Esempio n. 1
0
def test_feather_simple(plaw_test_data):

    orig_hdu, lowres_hdu, highres_hdu = plaw_test_data

    # HDU input
    combo = feather_simple(highres_hdu, lowres_hdu)

    # Projection input
    lowres_proj = Projection.from_hdu(lowres_hdu)
    highres_proj = Projection.from_hdu(highres_hdu)

    combo = feather_simple(highres_proj, lowres_proj)
Esempio n. 2
0
def image_sz512as_pl1p5_fwhm2as_scale1as(tmp_path):

    pixel_scale = 1 * units.arcsec
    restfreq = 100 * units.GHz

    highres_major = 2 * units.arcsec

    # Generate input image
    input_hdu = generate_test_fits(imsize=512, powerlaw=1.5,
                                beamfwhm=highres_major,
                                pixel_scale=pixel_scale,
                                restfreq=restfreq,
                                brightness_unit=units.Jy / units.sr)

    input_fn = tmp_path / "input_image_sz512as_pl1.5_fwhm2as_scale1as.fits"
    input_hdu.writeto(input_fn, overwrite=True)

    input_proj = Projection.from_hdu(input_hdu).to(units.Jy / units.beam)

    # Make Interferometric image
    intf_data = interferometrically_observe_image(image=input_hdu.data,
                                                pixel_scale=pixel_scale,
                                                largest_angular_scale=40*units.arcsec,
                                                smallest_angular_scale=highres_major)[0].real
    intf_hdu = fits.PrimaryHDU(data=intf_data.value if hasattr(intf_data, "value") else intf_data,
                                header=input_hdu.header)
    intf_proj = Projection.from_hdu(intf_hdu).to(units.Jy / units.beam)
    intf_fn = tmp_path / "input_image_sz512as_pl1.5_fwhm2as_scale1as_intf2to40as.fits"
    intf_proj.write(intf_fn, overwrite=True)

    # Make SD image
    sd_header = input_hdu.header.copy()

    major = 15*units.arcsec
    # Eff SD diam (to compare with CASA in troubleshooting)

    sd_beam = Beam(major=major)
    sd_header.update(sd_beam.to_header_keywords())

    sd_fn = tmp_path / "input_image_sz512as_pl1.5_fwhm2as_scale1as_sd15as.fits"
    sd_data = singledish_observe_image(input_hdu.data,
                                    pixel_scale=pixel_scale,
                                    beam=sd_beam,
                                    boundary='wrap')

    sd_hdu = fits.PrimaryHDU(data=sd_data.value if hasattr(sd_data, "value") else sd_data,
                            header=sd_header)
    sd_hdu.header.update(sd_beam.to_header_keywords())
    sd_proj = Projection.from_hdu(sd_hdu).to(units.Jy / units.beam)
    sd_proj.write(sd_fn, overwrite=True)

    return tmp_path, input_fn, intf_fn, sd_fn
def convert_and_reproject(name, template=None, unit=None, order=1):
    """
    Helper for moment1 hybrid routine. Reads in data, makes sure it is
    a projection, converts units, and reprojects as necessary.
    """

    # Ensure inputs are Projections
    if name is not None:
        if type(name) is Projection:
            data = name
        elif type(name) is str:
            hdu_list = fits.open(name)
            data = Projection.from_hdu(hdu_list[0])
        else:
            print("Input is not a string or a Projection")
            raise ValueError

        if unit is not None:
            data = data.to(unit)
        if template is not None:
            data = data.reproject(template.header, order=order)
    else:
        data = None

    return (data)
Esempio n. 4
0
def sfr_get(gal,hdr=None):
    if isinstance(gal,Galaxy):
        name = gal.name.lower()
    elif isinstance(gal,str):
        name = gal.lower()
    else:
        raise ValueError("'gal' must be a str or galaxy!")

    if name=='m33':
        filename = fits.open('notphangsdata/cube.fits')[13]
    else:
        filename = 'phangsdata/sfr/'+name+'_sfr_fuvw4.fits'
    if os.path.isfile(filename):
        sfr_map = Projection.from_hdu(fits.open(filename))
    else:
        print('WARNING: No SFR map was found!')
        sfr_map = None
        return sfr_map
    
    if hdr!=None:
        sfr = sfr_map.reproject(hdr) # Msun/yr/kpc^2. See header.
                                     # https://www.aanda.org/articles/aa/pdf/2015/06/aa23518-14.pdf
    else:
        sfr = sfr_map
    return sfr
Esempio n. 5
0
def test_SC_inputs():

    hdr['BUNIT'] = 'K'
    hdu = PrimaryHDU(img, header=hdr)

    proj = Projection.from_hdu(hdu)

    output = input_data(proj)

    npt.assert_equal(img, output["data"].value)
    assert output['data'].unit == u.K
    npt.assert_equal(proj.header, output["header"])

    slic = Slice.from_hdu(hdu)

    output = input_data(slic)

    npt.assert_equal(img, output["data"].value)
    assert output['data'].unit == u.K
    npt.assert_equal(slic.header, output["header"])
Esempio n. 6
0
def test_SC_inputs():

    hdr['BUNIT'] = 'K'
    hdu = PrimaryHDU(img, header=hdr)

    proj = Projection.from_hdu(hdu)

    output = input_data(proj)

    npt.assert_equal(img, output["data"].value)
    assert output['data'].unit == u.K
    npt.assert_equal(proj.header, output["header"])

    slic = Slice.from_hdu(hdu)

    output = input_data(slic)

    npt.assert_equal(img, output["data"].value)
    assert output['data'].unit == u.K
    npt.assert_equal(slic.header, output["header"])
Esempio n. 7
0
def test_pspec(plotname="pspec_rnoise_beamsmooth_apodizetukey.pdf",
               size=256,
               powerlaw=3.,
               run_kwargs={
                   'verbose': False,
                   'apodize_kernel': 'tukey'
               },
               plot_kwargs={'fit_color': 'black'},
               beam_smooth=True,
               pixel_scale=2 * u.arcsec,
               bmin=8.09 * u.arcsec,
               bmaj=10.01 * u.arcsec,
               bpa=-12.9 * u.deg,
               restfreq=1.4 * u.GHz,
               bunit=u.K):
    from spectral_cube import Projection
    from radio_beam import Beam

    rnoise_img = make_extended(size, powerlaw)
    # Create a FITS HDU
    rnoise_hdu = create_fits_hdu(rnoise_img, 2 * u.arcsec, 2 * u.arcsec,
                                 rnoise_img.shape, 1.4 * u.GHz, u.K)

    pspec = PowerSpectrum(rnoise_hdu)

    if beam_smooth:
        pencil_beam = Beam(0 * u.deg)
        rnoise_proj = Projection.from_hdu(rnoise_hdu).with_beam(pencil_beam)
        new_beam = Beam(bmaj, bmin, bpa)
        rnoise_conv = rnoise_proj.convolve_to(new_beam)

        # hdr = fits.Header(header)
        # rnoise_hdu = fits.PrimaryHDU(rnoise_img, header=hdr)
        pspec = PowerSpectrum(rnoise_conv)

    pspec.run(**run_kwargs)
    pspec.plot_fit(save_name=plotname, **plot_kwargs)

    return pspec
Esempio n. 8
0
    run_m33 = True

    # M31
    if run_m31:

        m31_cubename_K = f"{fifteenA_HI_BCtaper_wEBHIS_HI_file_dict['Cube'].rstrip('.fits')}_K.fits"

        m31_cube = SpectralCube.read(m31_cubename_K, use_dask=False)
        print(f'Opening cube {m31_cubename_K}')

        m31_vels = m31_cube.spectral_axis.to(u.m / u.s)

        # del m31_cube

        m31_mom0 = Projection.from_hdu(
            fits.open(fifteenA_HI_BCtaper_wEBHIS_HI_file_dict['Moment0'])).to(
                u.K * u.km / u.s)

        m31_multigauss_name = fifteenA_HI_BCtaper_04kms_data_wEBHIS_path(
            "individ_multigaussian_gausspy_fits_neighbcheck2_nomw.fits")
        # m31_multigauss_name = fifteenA_HI_BCtaper_04kms_data_wEBHIS_path("individ_multigaussian_gausspy_fits_neighbcheck2.fits")
        m31_multigauss_hdu = fits.open(m31_multigauss_name)

        m31_ngauss = np.isfinite(m31_multigauss_hdu[0].data).sum(0) // 3

        m31_thickHI_name = fifteenA_HI_BCtaper_04kms_data_wEBHIS_path(
            "individ_simplethick_HI_fits_5kms_centlimit.fits")
        m31_thickHI_hdu = fits.open(m31_thickHI_name)

        m31_thickHI80_name = fifteenA_HI_BCtaper_04kms_data_wEBHIS_path(
            "individ_simplethick_HI_fits_80kms_centlimit.fits")
Esempio n. 9
0
    fig2 = plt.figure()
    one_ax = fig2.add_subplot(111)

    for name, ax in zip(fitinfo_dict, axs):

        filename = f"{gal}_{fitinfo_dict[name]['filename_suffix']}"
        hdu = fits.open(osjoin(data_path, filename))

        # Multiple images in the PACS imgs
        if 'pacs' in name:
            data = hdu[0].data[0]
        else:
            data = hdu[0].data

        proj = Projection.from_hdu(fits.PrimaryHDU(data,
                                                   hdu[0].header))
        # Attach equiv Gaussian beam
        proj = proj.with_beam(fitinfo_dict[name]['beam'])
        # The convolved images should have all have a beam saved

        # Take minimal shape. Remove empty space.
        # Erode edges to avoid noisier region/uneven scans
        mask = np.isfinite(proj)
        mask = nd.binary_erosion(mask, np.ones((3, 3)), iterations=45)

        proj = proj[nd.find_objects(mask)[0]]

        # Save the cut-out, if it doesn't already exist
        # out_filename = "{}_cutout.fits".format(filename.rstrip(".fits"))

        # if not os.path.exists(osjoin(data_path, 'raw', out_filename)):
    (r'Ratio Fit: $\sigma_{\rm CO} = 0.56\, \sigma_{\rm HI}$',
     r'$\sigma_{\rm CO} = \sigma_{\rm HI}$'),
    frameon=True,
    loc=(0.56, 0.6),
)

# plt.tight_layout()
plt.subplots_adjust(hspace=0.03, wspace=0.03)

plt.savefig(osjoin(fig_path, "sigma_HI_vs_H2_w_fit_cornerplot.png"))
plt.savefig(osjoin(fig_path, "sigma_HI_vs_H2_w_fit_cornerplot.pdf"))
plt.close()

# What does this relation look like for line widths from the second moment
co_lwidth = Projection.from_hdu(
    fits.open(
        iram_co21_14B088_data_path("m33.co21_iram.14B-088_HI.lwidth.fits"))[0])
hi_lwidth = Projection.from_hdu(
    fits.open(fourteenB_wGBT_HI_file_dict['LWidth'])[0])

co_lwidth_vals = co_lwidth.value[tab['ypts'][good_pts],
                                 tab['xpts'][good_pts]] / 1000.
hi_lwidth_vals = hi_lwidth.value[tab['ypts'][good_pts],
                                 tab['xpts'][good_pts]] / 1000.

# How bad is the relation between the 2nd moment line widths

hist2d(hi_lwidth_vals, co_lwidth_vals, bins=13, data_kwargs={"alpha": 0.5})
plt.plot([4, 16], [4. * slope_ratio, 16. * slope_ratio],
         '--',
         color=sb.color_palette()[1],
Esempio n. 11
0
restfreq = 100 * units.GHz

highres_major = 2 * units.arcsec

# Generate input image
input_hdu = generate_test_fits(imsize=512,
                               powerlaw=3.0,
                               beamfwhm=highres_major,
                               pixel_scale=pixel_scale,
                               restfreq=restfreq,
                               brightness_unit=units.Jy / units.sr)

input_fn = tmp_path / "input_image_sz512as_pl1.5_fwhm2as_scale1as.fits"
input_hdu.writeto(input_fn, overwrite=True)

input_proj = Projection.from_hdu(input_hdu).to(units.Jy / units.beam)

# Make Interferometric image
intf_data = interferometrically_observe_image(
    image=input_hdu.data,
    pixel_scale=pixel_scale,
    largest_angular_scale=40 * units.arcsec,
    smallest_angular_scale=highres_major)[0].real
intf_hdu = fits.PrimaryHDU(
    data=intf_data.value if hasattr(intf_data, "value") else intf_data,
    header=input_hdu.header)
intf_proj = Projection.from_hdu(intf_hdu).to(units.Jy / units.beam)
intf_fn = tmp_path / "input_image_sz512as_pl1.5_fwhm2as_scale1as_intf2to40as.fits"
intf_proj.write(intf_fn, overwrite=True)

# Make SD image
Esempio n. 12
0
        mask_hdu.data.sum(0) > 10)

    del maskint_hdu, mask_hdu

    # Load in PB plane to account for varying uncertainty
    pb = fits.open(fourteenA_HI_file_dict['PB'], mode='denywrite')
    pb_plane = pb[0].data[0].copy()
    del pb

    # Need peak temp and centroid maps.

    # peak_name = fourteenA_wEBHIS_HI_file_dict['PeakTemp']
    # peaktemp = Projection.from_hdu(fits.open(peak_name))

    vcent_name = fourteenA_wEBHIS_HI_file_dict['Moment1']
    vcent = Projection.from_hdu(fits.open(vcent_name)).to(u.km / u.s)

    noise_val = 0.72 * u.K

    # Set max number of gaussians to something ridiculous.
    # Just so we don't have a failure putting into the output array
    max_comp = 30

    err_map = noise_val / pb_plane

    params_name = fourteenA_HI_data_wEBHIS_path(
        "individ_multigaussian_gausspy_fits.fits", no_check=True)

    if run_fit:

        agd_kwargs = {
sourcename = 'SourceI'
robust = 0.5
linename = 'NaClv=2_26-25'
naclfn = paths.dpath(
    'moments/Orion{1}_{0}_robust{robust}.maskedclarkclean10000_medsub_K_peak.fits'
).format(linename, sourcename, robust=robust)
linename = 'SiOv=0_8-7'
siov0fn = paths.dpath(
    'moments/Orion{1}_{0}_robust{robust}.maskedclarkclean10000_medsub_K_peak.fits'
).format(linename, sourcename, robust=robust)
linename = 'SiOv=5_8-7'
siov5fn = paths.dpath(
    'moments/Orion{1}_{0}_robust{robust}.maskedclarkclean10000_medsub_K_peak.fits'
).format(linename, sourcename, robust=robust)

siov0data = Projection.from_hdu(fits.open(siov0fn)[0])

mywcs = siov0data.wcs
pixscale = wcs.utils.proj_plane_pixel_area(mywcs)**0.5 * u.deg

imhalfsize = 0.2 * u.arcsec
pixhs = (imhalfsize / pixscale).decompose().value
assert 1000 > pixhs > 10
cy, cx = siov0data.shape[0] / 2., siov0data.shape[1] / 2.
siov0data = siov0data[int(cy - pixhs):int(cy + pixhs),
                      int(cx - pixhs):int(cx + pixhs)]

extent = [
    -siov0data.shape[1] / 2 * pixscale.to(u.arcsec).value,
    siov0data.shape[1] / 2 * pixscale.to(u.arcsec).value,
    -siov0data.shape[0] / 2 * pixscale.to(u.arcsec).value,
Esempio n. 14
0
for gal in gals:

    print("On {}".format(gal))

    for name in names:

        print("On {}".format(name))

        filename = "{0}_{1}_mjysr.fits".format(gal.lower(), name)

        if not os.path.exists(osjoin(data_path, gal, filename)):
            print("Could not find {}. Skipping".format(filename))
            continue

        hdu = fits.open(osjoin(data_path, gal, filename))
        proj = Projection.from_hdu(fits.PrimaryHDU(hdu[0].data.squeeze(),
                                                   hdu[0].header))

        out_filename = "{0}_{1}_mjysr.fits"\
            .format(gal.lower(), name)

        # Now open the kernel file
        kernfits_name = names[name][0]
        kernfits_ext = names[name][1]

        kernel_filename = osjoin(kern_path, kernfits_name)

        kern_proj = Projection.from_hdu(fits.open(osjoin(kern_path, kernel_filename))[kernfits_ext])

        img_scale = np.abs(proj_plane_pixel_scales(proj.wcs))[0]
        kern_scale = np.abs(proj_plane_pixel_scales(kern_proj.wcs))[0]
Esempio n. 15
0
    plot_savename = osjoin(plot_folder, "{0}.1Dpspec_wbeam.pdf".format(filename.rstrip(".fits")))
    plt.savefig(plot_savename)

    plt.close()

    df = pd.DataFrame(fit_results, index=row_names)
    df.to_csv(osjoin(data_path, "M33_HI", "pspec_hi_m33_fit_results.csv"))


if do_makepspec_conv:

    gal = 'M33'

    hdu = fits.open(hi_name)

    proj = Projection.from_hdu(hdu)

    beam = Beam(41 * u.arcsec)

    proj_conv = proj.convolve_to(beam)

    pspec = PowerSpectrum(proj_conv, distance=840 * u.kpc)

    pspec.run(verbose=False, fit_2D=False)

    pspec.save_results(hi_pspec_name_conv)

if do_fitpspec_conv:

    # Fit the same as the dust column density model
        spat_mask = mask_hdu.data.sum(0) > 20

        del mask_hdu

        # Load in PB plane to account for varying uncertainty
        pb = fits.open(fourteenB_HI_data_path("M33_14B-088_pbcov.fits"), mode='denywrite')
        # pb_plane = pb[0].data[0].copy()
        pb_plane = pb[0].data.copy()
        pb_plane = pb_plane[nd.find_objects(pb_plane > 0.5)[-1]]
        pb_plane[pb_plane < 0.5] = np.NaN
        del pb

        # Need peak temp and centroid maps.

        peak_name = fourteenB_wGBT_HI_file_dict['PeakTemp']
        peaktemp = Projection.from_hdu(fits.open(peak_name))

        vcent_name = fourteenB_wGBT_HI_file_dict['Moment1']
        vcent = Projection.from_hdu(fits.open(vcent_name)).to(u.km / u.s)

        # Noise lowered for the downsampled cube.
        noise_val = 2.8 * u.K / np.sqrt(2)

        err_map = noise_val / pb_plane

        params_array, uncerts_array, bic_array = \
            cube_fitter(downsamp_cube_name, fit_func_simple,
                        mask_name=None,
                        npars=4,
                        nfit_stats=3,
                        args=(),
Esempio n. 17
0
def makeGalaxyTable(galaxy, vtype, regridDir, outDir):
    '''

    make fitstable containing all lines from stacking results for each galaxy

    galaxy: data for galaxy we are processing from degas_base.fits

    vtype: velocity type that we are stacking on
    
    scriptDir: script directory -- AAK: if I read in degas_base.fits early,
    so I still need to pass this down.?
    
    regridDir: regrid directory
    
    outDir: output directory

    
    Date        Programmer      Description of Changes
    ----------------------------------------------------------------------
    10/29/2020  Yiqing Song     Original Code
    12/3/2020   A.A. Kepley     Added comments
    4/29/2021   A.A. Kepley     Moved all calculations for galaxy here 
                                instead of repeating per line.
    5/6/2021    A.A. Kepley     Modified so that all lines are calculated at once.

    '''

    print("Processing " + galaxy['NAME'] + "\n")

    # Create associated maps needed for analysis.
    mom0cut, cubeCO, sn_mask = mapSN(galaxy, regridDir, outDir, sncut=3.0)
    stellarmap = mapStellar(galaxy, mom0cut, regridDir, outDir)
    sfrmap = mapSFR(galaxy, mom0cut, regridDir, outDir)
    ltirmap = mapLTIR(galaxy, mom0cut, regridDir, outDir)
    R_arcsec, R_kpc, R_r25 = mapGCR(galaxy, mom0cut)

    velocity_file = galaxy['NAME'] + '_12CO_' + vtype + '_regrid.fits'
    vhdu = fits.open(os.path.join(regridDir, velocity_file))
    velocity = Projection.from_hdu(vhdu)

    # read in HCN
    linefile = os.path.join(
        regridDir, galaxy['NAME'] +
        '_HCN_rebase3_smooth1.3_hanning1_maxnchan_smooth.fits')
    cubeHCN = SpectralCube.read(os.path.join(regridDir, linefile),
                                mask=sn_mask)

    # read in HCO+
    linefile = os.path.join(
        regridDir,
        galaxy['NAME'] + '_HCOp_rebase3_smooth1.3_hanning1_smooth_regrid.fits')
    cubeHCOp = SpectralCube.read(os.path.join(regridDir, linefile),
                                 mask=sn_mask)

    # For NGC6946, skip 13CO and C18O since we don't have that data.
    if galaxy['NAME'] != 'NGC6946':
        # read in 13CO
        linefile = os.path.join(
            regridDir, galaxy['NAME'] +
            '_13CO_rebase3_smooth1.3_hanning1_smooth_regrid.fits')
        cube13CO = SpectralCube.read(os.path.join(regridDir, linefile),
                                     mask=sn_mask)

        # read in C18O
        linefile = os.path.join(
            regridDir, galaxy['NAME'] +
            '_C18O_rebase3_smooth1.3_hanning1_smooth_regrid.fits')
        cubeC18O = SpectralCube.read(os.path.join(regridDir, linefile),
                                     mask=sn_mask)

    else:
        cube13CO = None
        cubeC18O = None

    #get the full stack result for each line
    full_stack = makeStack(galaxy,
                           regridDir,
                           outDir,
                           mom0cut=mom0cut,
                           cubeCO=cubeCO,
                           cubeHCN=cubeHCN,
                           cubeHCOp=cubeHCOp,
                           cube13CO=cube13CO,
                           cubeC18O=cubeC18O,
                           velocity=velocity,
                           sfrmap=sfrmap,
                           ltirmap=ltirmap,
                           stellarmap=stellarmap,
                           R_arcsec=R_arcsec)
    # remove stacks that don't have CO spectra
    nstack = len(full_stack)
    keepstack = np.full(nstack, True)

    for i in range(nstack):
        if np.all(full_stack['stack_profile_CO'][i] == 0):
            keepstack[i] = False

    full_stack = full_stack[keepstack]

    fit_plot_dir = os.path.join(outDir, 'spec_fits')
    if not os.path.exists(fit_plot_dir):
        os.mkdir(fit_plot_dir)

    full_stack = addIntegratedIntensity(full_stack, fit_plot_dir)

    # return the table and the stack.
    return full_stack
Esempio n. 18
0
    beamfwhm = 3 * u.arcsec
    imshape = rnoise_img.shape
    restfreq = 1.4 * u.GHz
    bunit = u.K

    plaw_hdu = create_fits_hdu(rnoise_img, pixel_scale, beamfwhm, imshape,
                               restfreq, bunit)

    pspec = PowerSpectrum(plaw_hdu)
    pspec.run(verbose=True, radial_pspec_kwargs={'binsize': 1.0},
              fit_kwargs={'weighted_fit': False}, fit_2D=False,
              low_cut=1. / (60 * u.pix),
              save_name=osjoin(fig_path, "rednoise_pspec_slope3.png"))

    pencil_beam = Beam(0 * u.deg)
    plaw_proj = Projection.from_hdu(plaw_hdu)
    plaw_proj = plaw_proj.with_beam(pencil_beam)

    new_beam = Beam(3 * plaw_hdu.header['CDELT2'] * u.deg)
    plaw_conv = plaw_proj.convolve_to(new_beam)

    plaw_conv.quicklook()
    plt.savefig('images/rednoise_slope3_img_smoothed.png')
    plt.close()

    pspec2 = PowerSpectrum(plaw_conv)
    pspec2.run(verbose=True, xunit=u.pix**-1, fit_2D=False,
               low_cut=0.025 / u.pix, high_cut=0.1 / u.pix,
               radial_pspec_kwargs={'binsize': 1.0},
               apodize_kernel='tukey')
    plt.axvline(np.log10(1 / 3.), color=col_pal[3], linewidth=8, alpha=0.8,
hi_name = osjoin(data_path, "M31_HI",
                 "M31_14A_HI_contsub_width_04kms.image.pbcor.mom0.Kkms.fits")

co_name = osjoin(data_path, "M31_CO", "m31_iram_Kkms.fits")

dust_name = osjoin(
    data_path, "M31",
    r"m31_dust.surface.density_FB.beta=1.8_gauss46.3_regrid_bksub.fits")

co10_mass_conversion = 4.8 * (u.Msun / u.pc**2) / (u.K * u.km / u.s)

# Note that the top two conversions contain a 1.4x correction for He.
# So they will give the atomic mass, not the HI mass!
hi_mass_conversion = 0.0196 * (u.M_sun / u.pc**2) / (u.K * u.km / u.s)

hi_proj = Projection.from_hdu(fits.open(hi_name))

# Convolve co_proj to the HI beam
co_proj = Projection.from_hdu(fits.open(co_name)).to(u.K * u.km / u.s)
co_proj[np.isnan(co_proj)] = 0.
co_proj[co_proj.value < 0.] = 0.
co_proj = co_proj.convolve_to(hi_proj.beam)

co_proj = co_proj.reproject(hi_proj.header)

hdu_dust = fits.open(dust_name)

# The edges of the maps have high uncertainty. For M31, this may be altering
# the shape of the power-spectrum. Try removing these edges:
dust_mask = np.isfinite(hdu_dust[0].data[0].squeeze())
Esempio n. 20
0
def stack(line,cube, galaxy, vtype, basemap, basetype, bintype, sfrmap=None, weightmap=None,degasdir='./',datadir='./'):
    #create bins by intensity or radius
    #base map is the map used to create bins, i.e. mom0 or stellarmass
    #import mom1 or  peakvel fits for stacking velocity
    table=Table.read(degasdir+'degas_base.fits')
    table=table[table['NAME']==galaxy.upper()]
    Dmpc=table['DIST_MPC'][0]
    pix_area=(np.radians(np.abs(cube.header['CDELT1']))*Dmpc*1000)**2  #kpc^2
    nchan=cube.shape[0]
    velocity_file=galaxy+'_'+vtype+'.fits'
    vhdu=fits.open(datadir+velocity_file)
    velocity=Projection.from_hdu(vhdu)
    vtype=velocity_file.split('_')[1].split('.')[0] #mom1 or peakvel
    binmap, binedge,binlabels=makeBins(galaxy, basemap, bintype)
    cmap=plotBins(galaxy, binmap, binedge, binlabels, bintype) #plot binmap
    stack, labelvals = stacking.BinByLabel(cube, binmap.value, velocity,
                                           weight_map=weightmap)
    xunit={'intensity':'K km/s','radius':'R25','stellarmass':'Msun/pc^2'}#unit for stellarmass needs to be updated!!
    colors=cmap(np.linspace(0,1,len(stack)+1))
    bin_mean=np.zeros(len(stack))
    stacksum=np.zeros(len(stack))
    stacked_profile=np.zeros((len(stack),len(stack[0]['spectral_axis'])))
    binlabel=np.zeros(len(stack))
    bin_mean=np.zeros(len(stack))
    bin_lower=np.zeros(len(stack))
    bin_upper=np.zeros(len(stack))
    stacknoise=np.zeros(len(stack))
    sfr_mean=np.zeros(len(stack))
    bin_area=np.zeros(len(stack))
    for i in range(len(stack)):
        d=stack[i]
        stacked_profile[i,:]=d['spectrum']
        integral=np.abs(np.trapz(y=d['spectrum'],x=d['spectral_axis']) )#sum intensity under curve, K km/s
        noise_region=np.logical_or(d['spectral_axis'].value < -250, d['spectral_axis'].value > 250)
        channoise=np.nanstd(d['spectrum'][noise_region]) #noise from signal free channels, nned to multiple by sqrt(number of channels) and channel width 
        stacknoise[i]=channoise*np.sqrt(nchan)*np.abs(d['spectral_axis'].value[0]-d['spectral_axis'].value[1])
        bin_lower[i]=binedge[i]
        bin_upper[i]=binedge[i+1]
        bin_mean[i]=(binedge[i]+binedge[i+1])/2
        stacksum[i]=integral.value
        binlabel[i]=d['label']
        if sfrmap is None:
            pass
        else:
            sfr_mean[i]=np.nanmean(sfrmap[binmap==binlabel[i]])
            #calculate the area of each bin in units of kpc^2
            bin_area[i]=len(binmap[binmap==binlabel[i]].flatten())*pix_area

    
    total_stack={}
    if line=='CO': #CO is used to make stacking bins
        total_stack['spectral_axis']=stack[0]['spectral_axis'].value
        total_stack['bin_lower']=bin_lower
        total_stack['bin_upper']=bin_upper
        total_stack['bin_mean']=bin_mean
        total_stack['bin_type']=bintype
        total_stack['bin_unit']=xunit[bintype]
        if sfrmap is None:
            pass
        else:
            total_stack['sfr_mean_w4fuv']=sfr_mean
            total_stack['bin_area']=bin_area
    total_stack[line+'_stack_profile']=stacked_profile
    total_stack[line+'_stack_sum']=stacksum
    total_stack[line+'_stack_noise']=stacknoise
    return total_stack
Esempio n. 21
0
sc1 = SpectralCube(data=cube1, wcs=WCS(header))
mask = LazyMask(np.isfinite, sc1)
sc1 = sc1.with_mask(mask)
# Set the scale for the purposes of the tests
props1 = Moments(sc1, scale=0.003031065017916262 * u.Unit(""))
# props1.make_mask(mask=mask)
props1.make_moments()
props1.make_moment_errors()

dataset1 = props1.to_dict()

moment0_hdu1 = fits.PrimaryHDU(dataset1["moment0"][0],
                               header=dataset1["moment0"][1])

moment0_proj = Projection.from_hdu(moment0_hdu1)

##############################################################################

path2 = os.path.join(turb_path, "data/dataset2.npz")

dataset2 = np.load(path2)

cube2 = np.empty((500, 32, 32))

count = 0
for posn, kept in zip(*dataset2["channels"]):
    posn = int(posn)
    if kept:
        cube2[posn, :, :] = dataset2["cube"][count, :, :]
        count += 1
Esempio n. 22
0
    ncores = 1

    img_view = False

    skip_check = True

    # Load in the dust column density maps to set the allowed
    # spatial region

    filename_coldens = glob(osjoin(data_path, '455pc', "dust.fits"))

    hdu_coldens = fits.open(filename_coldens[0])

    pad_size = 0.5 * u.arcmin

    proj_coldens = Projection.from_hdu(
        fits.PrimaryHDU(hdu_coldens[0].data, hdu_coldens[0].header))

    # Get minimal size
    coldens_mask = np.isfinite(proj_coldens)
    coldens_mask = mo.remove_small_objects(coldens_mask, min_size=12)

    proj_coldens = proj_coldens[nd.find_objects(coldens_mask)[0]]

    # Get spatial extents.
    # NOTE: extrema for 2D objects broken in spectral-cube! Need to fix...
    lat, lon = proj_coldens.spatial_coordinate_map
    lat_min = lat.min() - pad_size
    lat_max = lat.max() + pad_size
    lon_min = lon.min() - pad_size
    lon_max = lon.max() + pad_size
Esempio n. 23
0
from galaxy_params import gal_feath as gal

default_figure()

fig_path = allfigs_path("co_vs_hi")
if not os.path.exists(fig_path):
    os.mkdir(fig_path)

col_pal = sb.color_palette('colorblind')

cosinc = np.cos(gal.inclination.to(u.rad)).value

moment0 = fits.open(fourteenB_wGBT_HI_file_dict["Moment0"])[0]
moment0_wcs = WCS(moment0.header)

mom0_proj = Projection.from_hdu(moment0)

beam = Beam.from_fits_header(moment0.header)

# Convert to K km s and correct for disk inclination.
moment0_Kkm_s = beam.jtok(hi_freq).value * (moment0.data / 1000.) * cosinc
moment0_coldens = moment0_Kkm_s * hi_coldens_Kkms.value

pixscale = np.sqrt(proj_plane_pixel_area(moment0_wcs))

# Use the reprojected version
co_moment0 = fits.open(
    iram_co21_14B088_data_path("m33.co21_iram.14B-088_HI.mom0.fits"))[0]

co_noise_map = fits.open(
    iram_co21_14B088_data_path("m33.rms.14B-088_HI.fits"))[0]
Esempio n. 24
0
                            ).format(linename, sourcename, robust=robust)
                            log.info("Writing {0}".format(linecubepath))
                            cubeK.write(linecubepath, overwrite=True)

                            m0 = cubeK.moment0(axis=0)
                            mx = cubeK.max(axis=0)

                            mx.write(mx_fn, overwrite=True)
                            m0.write(m0_fn, overwrite=True)

                            del cubeK
                            del scube
                        else:

                            # the cubes have to exist anyway...
                            mx = Projection.from_hdu(fits.open(mx_fn)[0])
                            m0 = Projection.from_hdu(fits.open(m0_fn)[0])

                        for (contband, conthdu) in continua.items():
                            print("Figures for {0} in spw {1}".format(
                                linename, spw))
                            pl.figure(1).clf()

                            if hasattr(mx, 'FITSFigure'):
                                del mx.FITSFigure
                            mx.quicklook(filename=paths.fpath(
                                'moments/Orion{1}_{0}_robust{robust}.maskedclarkclean10000_medsub_K_peak.pdf'
                            ).format(linename, sourcename, robust=robust),
                                         aplpy_kwargs={'figure': pl.figure(1)})
                            mx.FITSFigure.show_grayscale(invert=True)
Esempio n. 25
0
from spectral_cube import SpectralCube, Projection

import paths
import files
import regions

import pylab as pl

regs = regions.read_ds9(paths.rpath('sio_masers.reg'))
v2maser = regs[2]

bluefile = paths.Fpath('SgrB2_N_SiO_blue_20to50kms.fits')
redfile = paths.Fpath('SgrB2_N_SiO_blue_77to100kms.fits')
if os.path.exists(bluefile):
    blue = Projection.from_hdu(fits.open(bluefile))
    red = Projection.from_hdu(fits.open(redfile))
else:

    siocube = (SpectralCube.read(
        '/Volumes/external/sgrb2/full_SgrB2N_spw0_lines_cutoutN_medsub.fits').
               with_spectral_unit(u.km / u.s,
                                  velocity_convention='radio',
                                  rest_value=217.10498 * u.GHz).spectral_slab(
                                      -200 * u.km / u.s, 250 * u.km / u.s))
    siocube.spectral_slab(0 * u.km / u.s, 120 * u.km / u.s).write(
        'SgrB2_N_SiO_medsub_cutout.fits', overwrite=True)

    blue = siocube.spectral_slab(20 * u.km / u.s, 50 * u.km / u.s).moment0()
    blue.write(bluefile, overwrite=True)
from astropy import wcs
from astropy.io import fits
from astropy import stats
import paths
import pylab as pl
from scipy.ndimage import map_coordinates
import scipy.signal
import reproject

from files import b3_hires_cont, b6_hires_cont, b7_hires_cont
from constants import source, extraction_path, origin, central_freqs

# vmap produced by stacked_line_search.py
vmap_name = paths.dpath('disk_velocity_map.fits')
hdu = fits.open(vmap_name)[0]
vmap = Projection.from_hdu(hdu)

b3beam = radio_beam.Beam.from_fits_header(
    fits.getheader(paths.dpath(b3_hires_cont)))

print("per-band continuum measurements in the spectral extraction aperture: ")
for ii, contfn in enumerate((b3_hires_cont, b6_hires_cont, b7_hires_cont)):
    band = contfn[14:16]

    conthdu = fits.open(paths.dpath(contfn))[0]

    ww = wcs.WCS(conthdu.header)

    #vmap_proj,_ = reproject.reproject_interp(vmap.hdu,
    #                                         ww,
    #                                         shape_out=conthdu.data.shape)
print(f"Inputs: {line_name} {spec_width} "
      f"{convolve_to_round_beam} {exclude_asymm} {overwrite}")

# Make sure the line name is valid
if line_name not in spw_setup_2019.keys():
    raise ValueError("Line name is not found in list: {0}".format(spw_setup_2019.keys()))

if spec_width not in imaging_linedict[line_name].keys():
    raise ValueError("Spec setup not found in list defined in line_imaging_params.py: {0}".format(imaging_linedict.keys()))

# Load in the 12CO 30-m moment 1 map to define line-free channels
co_iram_mom1_file = os.path.expanduser("~/bigdata/ekoch/M33/co21/m33.co21_iram.mom1.fits")
# co_iram_mom1_file = os.path.expanduser("~/storage/M33/IRAM/m33.co21_iram.mom1.fits")
co_mom1_hdu = fits.open(co_iram_mom1_file)
co_mom1 = Projection.from_hdu(co_mom1_hdu)


mosaic_line_path = osjoin(mosaic_path, line_name)

if not os.path.exists(mosaic_line_path):
    os.mkdir(mosaic_line_path)

# Grab all pbcor images for that line and spectral width

per_mosaic_line_path = osjoin(data_path, 'per_mosaic_imaging', line_name)

images = glob(osjoin(per_mosaic_line_path,
                     "Brick*_{0}_{1}.image.pbcor".format(line_name, spec_width)))

pbs = glob(osjoin(per_mosaic_line_path,
Esempio n. 28
0
def convolve_image(inimage,
                   newbeam,
                   mode='dataimage',
                   res_tol=0.0,
                   min_coverage=0.8,
                   nan_treatment='fill',
                   boundary='fill',
                   fill_value=0.,
                   append_raw=False,
                   verbose=False,
                   suppress_error=False):
    """
    Convolve a 2D image or an rms noise image to a specified beam.

    This function is similar to `convolve_cube()`, but it deals with
    2D images (i.e., projections) rather than 3D cubes.

    Parameters
    ----------
    inimage : FITS HDU object or ~spectral_cube.Projection object
        Input 2D image
    newbeam : radio_beam.Beam object
        Target beam to convolve to
    mode : {'dataimage', 'noiseimage'}, optional
        Whether the input image is a data image or an rms noise image.
        In the former case, a direct convolution is performed;
        in the latter case, the convolution attempts to mimic the
        error propagation process to the specified lower resolution.
        (Default: 'dataimage')
    res_tol : float, optional
        Tolerance on the difference between input/output resolution
        By default, a convolution is performed on the input image
        when its native resolution is different from (sharper than)
        the target resolution. Use this keyword to specify a tolerance
        on resolution, within which no convolution will be performed.
        For example, res_tol=0.1 will allow a 10% tolerance.
    min_coverage : float or None, optional
        This keyword specifies a minimum beam covering fraction of
        valid pixels for convolution (Default: 0.8).
        Locations with a beam covering fraction less than this value
        will be overwritten to "NaN" in the convolved cube.
        If the user would rather use the ``preserve_nan`` mode in
        `astropy.convolution.convolve_fft`, set this keyword to None.
    nan_treatment: {'interpolate', 'fill'}, optional
        To be passed to `astropy.convolution.convolve_fft`.
        (Default: 'fill')
    boundary: {'fill', 'wrap'}, optional
        To be passed to `astropy.convolution.convolve_fft`.
        (Default: 'fill')
    fill_value : float, optional
        To be passed to `astropy.convolution.convolve_fft`.
        (Default: 0)
    append_raw : bool, optional
        Whether to append the raw convolved image and weight image.
        Default is not to append.
    verbose : bool, optional
        Whether to print the detailed processing log in terminal.
        Default is to not print.
    suppress_error : bool, optional
        Whether to suppress the error message when convolution is
        unsuccessful. Default is to not suppress.

    Returns
    -------
    outimage : FITS HDU objects or Projection objects
        Convolved 2D images (when append_raw=False), or a 3-tuple
        including a masked verson, an unmaked version, and a coverage
        fraction map (when append_raw=True).
        The output will be the same type of objects as the input.
    """

    if isinstance(inimage, Projection):
        proj = inimage
    elif isinstance(inimage, (fits.PrimaryHDU, fits.ImageHDU)):
        proj = Projection.from_hdu(inimage)
    else:
        raise ValueError("`inimage` needs to be either a FITS HDU object "
                         "or a spectral_cube.Projection object")

    if (res_tol > 0) and (newbeam.major != newbeam.minor):
        raise ValueError("Cannot handle a non-zero resolution torelance "
                         "when the target beam is not round")

    if min_coverage is None:
        # Skip coverage check and preserve NaN values.
        # This uses the default 'preserve_nan' scheme
        # implemented in 'astropy.convolution.convolve_fft'
        convolve_func = partial(convolve_fft,
                                fill_value=fill_value,
                                nan_treatment=nan_treatment,
                                boundary=boundary,
                                preserve_nan=True,
                                allow_huge=True)
    else:
        # Do coverage check to determine the mask on the output
        convolve_func = partial(convolve_fft,
                                fill_value=fill_value,
                                nan_treatment=nan_treatment,
                                boundary=boundary,
                                allow_huge=True)
    convolve_func_w = partial(convolve_fft,
                              fill_value=0.,
                              boundary='fill',
                              allow_huge=True)

    tol = newbeam.major * np.array([1 - res_tol, 1 + res_tol])
    if ((tol[0] < proj.beam.major < tol[1])
            and (tol[0] < proj.beam.minor < tol[1])):
        if verbose:
            print("Native resolution within tolerance - "
                  "Copying original image...")
        my_append_raw = False
        convproj = wtproj = None
        newproj = proj.copy()
    else:
        if verbose:
            print("Deconvolving beam...")
        try:
            beamdiff = newbeam.deconvolve(proj.beam)
        except ValueError as err:
            if suppress_error:
                if verbose:
                    print("Unsuccessful beam deconvolution: "
                          "{}\nOld: {}\nNew: {}"
                          "".format(err, proj.beam, newbeam))
                    print("Exiting...")
                return
            else:
                raise ValueError("Unsuccessful beam deconvolution: "
                                 "{}\nOld: {}\nNew: {}"
                                 "".format(err, proj.beam, newbeam))
        if verbose:
            print("Convolving image...")
        if mode == 'dataimage':
            # do convolution
            convproj = proj.convolve_to(newbeam, convolve=convolve_func)
            if min_coverage is not None:
                my_append_raw = True
                wtproj = Projection(np.isfinite(proj.data).astype('float'),
                                    wcs=proj.wcs,
                                    beam=proj.beam)
                wtproj = wtproj.convolve_to(newbeam, convolve=convolve_func_w)
                # divide the raw convolved image by the weight image
                # to correct for filling fraction
                newproj = convproj / wtproj.hdu.data
                # mask all pixels w/ weight smaller than min_coverage
                threshold = min_coverage * u.dimensionless_unscaled
                newproj[wtproj < threshold] = np.nan
            else:
                my_append_raw = False
                newproj = convproj
                wtproj = None
        elif mode == 'noiseimage':
            # Empirically derive a noise image at the lower resolution
            # Step 1: square the high resolution noise image
            projsq = proj**2
            # Step 2: convolve the squared noise image with a kernel
            #         that is sqrt(2) times narrower than the one
            #         used for data image convolution (this is because
            #         the Gaussian weight needs to be squared in
            #         error propagation)
            beamdiff_small = Beam(major=beamdiff.major / np.sqrt(2),
                                  minor=beamdiff.minor / np.sqrt(2),
                                  pa=beamdiff.pa)
            newbeam_small = proj.beam.convolve(beamdiff_small)
            convprojsq = projsq.convolve_to(newbeam_small,
                                            convolve=convolve_func)
            if min_coverage is not None:
                my_append_raw = True
                wtproj = Projection(np.isfinite(proj.data).astype('float'),
                                    wcs=proj.wcs,
                                    beam=proj.beam)
                # divide the raw convolved image by the weight image
                # to correct for filling fraction
                wtproj_d = wtproj.convolve_to(newbeam_small,
                                              convolve=convolve_func_w)
                newprojsq = convprojsq / wtproj_d.hdu.data
                # mask all pixels w/ weight smaller than min_coverage
                # (here I force the masking of the noise image to be
                #  consistent with that of the data image)
                wtproj = wtproj.convolve_to(newbeam, convolve=convolve_func_w)
                threshold = min_coverage * u.dimensionless_unscaled
                newprojsq[wtproj < threshold] = np.nan
            else:
                my_append_raw = False
                newprojsq = convprojsq
                wtproj = None
            # Step 3: find the sqrt of the convolved noise image
            convproj = np.sqrt(convprojsq)
            newproj = np.sqrt(newprojsq)
            # Step 4: apply a multiplicative factor, which accounts
            #         for the decrease in rms noise due to averaging
            convproj = (convproj *
                        np.sqrt(proj.beam.sr / newbeam.sr).to('').value)
            newproj = (newproj *
                       np.sqrt(proj.beam.sr / newbeam.sr).to('').value)
        else:
            raise ValueError("Invalid `mode` value: {}".format(mode))

    if isinstance(inimage, Projection):
        if append_raw and my_append_raw:
            return newproj, convproj, wtproj
        else:
            return newproj
    elif isinstance(inimage, (fits.PrimaryHDU, fits.ImageHDU)):
        if append_raw and my_append_raw:
            return newproj.hdu, convproj.hdu, wtproj.hdu
        else:
            return newproj.hdu
Esempio n. 29
0
def test_casafeather(image_sz512as_pl1p5_fwhm2as_scale1as, sdfactor,
                     lowpassfilterSD):

    tmp_path, input_fn, intf_fn, sd_fn = image_sz512as_pl1p5_fwhm2as_scale1as

    intf_hdu = fits.open(intf_fn)[0]
    sd_hdu = fits.open(sd_fn)[0]

    # Grab the rest frequency set in the header
    restfreq = (intf_hdu.header['RESTFRQ'] * units.Hz).to(units.GHz)

    # Feathering with CASA

    intf_fn_image = intf_fn.parent / intf_fn.name.replace(".fits", ".image")
    sd_fn_image = sd_fn.parent / sd_fn.name.replace(".fits", ".image")

    # CASA needs a posix string to work
    importfits(fitsimage=intf_fn.as_posix(),
               imagename=intf_fn_image.as_posix(),
               defaultaxes=True,
               defaultaxesvalues=['', '', f'{restfreq.value}GHz', 'I'])
    importfits(fitsimage=sd_fn.as_posix(),
               imagename=sd_fn_image.as_posix(),
               defaultaxes=True,
               defaultaxesvalues=['', '', f'{restfreq.value}GHz', 'I'])

    output_name = tmp_path / 'casafeathered.image'

    feather(
        imagename=output_name.as_posix(),
        highres=intf_fn_image.as_posix(),
        lowres=sd_fn_image.as_posix(),
        sdfactor=sdfactor,
        lowpassfiltersd=lowpassfilterSD,
    )

    # Right now read as spectralcube despite being a 2D image.
    casa_feather_proj = SpectralCube.read(output_name)[0]

    # Feathering with uvcombine
    feathered_hdu = feather_simple(hires=intf_hdu,
                                   lores=sd_hdu,
                                   lowresscalefactor=sdfactor,
                                   lowpassfilterSD=lowpassfilterSD,
                                   deconvSD=False,
                                   return_hdu=True)

    uvcomb_feather_proj = Projection.from_hdu(feathered_hdu)

    diff = (casa_feather_proj - uvcomb_feather_proj).value

    # By-hand checks. Keep so we remember.
    # print("Proof that we have exactly reimplemented CASA's feather: ")
    # print("((casa-uvcombine)**2 / casa**2).sum() = {0}"
    #       .format(((diff**2)/(casa_feather_proj.value**2)).sum()))
    # print("Maximum of abs(diff): {0}".format(np.abs(diff).max()))

    # Check for agreement within 0.05%
    if lowpassfilterSD:
        # assert np.abs(diff / casa_feather_proj.value).max() < 2e-4
        assert np.abs(np.median(diff / casa_feather_proj.value)) < 5e-4
    else:
        # assert np.abs(diff / casa_feather_proj.value).max() < 1e-7
        assert np.abs(np.median(diff / casa_feather_proj.value)) < 5e-4
Esempio n. 30
0
    restfreq = 1.4 * u.GHz
    bunit = u.K

    plaw_hdu = create_fits_hdu(rnoise_img, pixel_scale, beamfwhm, imshape,
                               restfreq, bunit)

    pspec = PowerSpectrum(plaw_hdu)
    pspec.run(verbose=True,
              radial_pspec_kwargs={'binsize': 1.0},
              fit_kwargs={'weighted_fit': False},
              fit_2D=False,
              low_cut=1. / (60 * u.pix),
              save_name=osjoin(fig_path, "rednoise_pspec_slope3.png"))

    pencil_beam = Beam(0 * u.deg)
    plaw_proj = Projection.from_hdu(plaw_hdu)
    plaw_proj = plaw_proj.with_beam(pencil_beam)

    new_beam = Beam(3 * plaw_hdu.header['CDELT2'] * u.deg)
    plaw_conv = plaw_proj.convolve_to(new_beam)

    plaw_conv.quicklook()
    plt.savefig('images/rednoise_slope3_img_smoothed.png')
    plt.close()

    pspec2 = PowerSpectrum(plaw_conv)
    pspec2.run(verbose=True,
               xunit=u.pix**-1,
               fit_2D=False,
               low_cut=0.025 / u.pix,
               high_cut=0.1 / u.pix,
    for res_type in res_types:

        print("Resolution {}".format(res_type))

        if res_type == 'orig':
            filename = "{0}_{1}_mjysr_cutout.fits".format(gal.lower(), name)
        else:
            filename = "{0}_{1}_{2}_mjysr_cutout.fits".format(gal.lower(), name, res_type)

        if not os.path.exists(osjoin(data_path, gal, filename)):
            print("Could not find {}. Skipping".format(filename))
            continue

        hdu = fits.open(osjoin(data_path, gal, filename))
        proj = Projection.from_hdu(fits.PrimaryHDU(hdu[0].data.squeeze(),
                                                   hdu[0].header))
        # Attach equiv Gaussian beam
        # if res_type == 'orig':
        #     proj = proj.with_beam(names[name])

        # With and without 30 Dor
        for slice_name in lmc_mips24_slice:

            slicer = lmc_mips24_slice[slice_name]

            if res_type == 'orig':
                save_name = "{0}_{1}_{2}_mjysr.pspec.pkl".format(gal.lower(),
                                                                 name, slice_name)
            else:
                save_name = "{0}_{1}_{2}_{3}_mjysr.pspec.pkl".format(gal.lower(),
                                                                     name,
Esempio n. 32
0
        filename = osjoin(data_path, gal, fitinfo_dict[gal]['filename'])

        hdu_coldens = fits.open(filename)

        # The edges of the maps have high uncertainty. For M31, this may be altering
        # the shape of the power-spectrum. Try removing these edges:
        coldens_mask = np.isfinite(hdu_coldens[0].data[0].squeeze())

        coldens_mask = nd.binary_erosion(coldens_mask,
                                         structure=np.ones((3, 3)),
                                         iterations=8)

        # Get minimal size
        masked_data = hdu_coldens[0].data[0].squeeze()
        masked_data[~coldens_mask] = np.NaN
        proj_coldens = Projection.from_hdu(
            fits.PrimaryHDU(masked_data, hdu_coldens[0].header))

        proj_coldens = proj_coldens[nd.find_objects(coldens_mask)[0]]

        proj_coldens = proj_coldens.with_beam(fitinfo_dict[gal]['beam'])

        # Look at the uncertainty map
        masked_errs = hdu_coldens[0].data[2].squeeze()
        masked_errs[~coldens_mask] = np.NaN
        proj_coldens_err = Projection.from_hdu(
            fits.PrimaryHDU(masked_errs, hdu_coldens[0].header))

        proj_coldens_err = proj_coldens_err[nd.find_objects(coldens_mask)[0]]

        proj_coldens_err = proj_coldens_err.with_beam(
            fitinfo_dict[gal]['beam'])
Esempio n. 33
0
    for gal in gals:

        dist = gals[gal]

        # Load in the dust column density maps to set the allowed
        # spatial region

        filename_coldens = glob(
            osjoin(data_path, gal, "*dust.surface.density*.fits"))

        hdu_coldens = fits.open(filename_coldens[0])

        pad_size = 0.5 * u.arcmin

        proj_coldens = Projection.from_hdu(
            fits.PrimaryHDU(hdu_coldens[0].data[0].squeeze(),
                            hdu_coldens[0].header))

        # Get minimal size
        proj_coldens = proj_coldens[nd.find_objects(
            np.isfinite(proj_coldens))[0]]

        # Get spatial extents.
        # NOTE: extrema for 2D objects broken in spectral-cube! Need to fix...
        lat, lon = proj_coldens.spatial_coordinate_map
        lat_min = lat.min() - pad_size
        lat_max = lat.max() + pad_size
        lon_min = lon.min() - pad_size
        lon_max = lon.max() + pad_size

        def spat_mask_maker(lat_map, lon_map):
Esempio n. 34
0
import paths
import files
import regions

import pylab as pl

regs = regions.read_ds9(paths.rpath('sio_masers.reg'))
v2maser = regs[2]

cont_b6 = fits.open(files.contb6_rm2)
cont_b3 = fits.open(files.contb3_rm2)

siov2j2im = paths.Fpath('SiO_v2_j2_peak_N.fits')
if os.path.exists(siov2j2im):
    siov2j2 = Projection.from_hdu(fits.open(siov2j2im))
else:
    region, spw, band, freq = 'N', 0, 3, 85.640452 * u.GHz
    fn = paths.eFpath(
        'sgr_b2m.{0}.spw{1}.B{2}.lines.clarkclean1000.robust0.5.image.pbcor.medsub.fits'
        .format(region, spw, band))
    siov2j2 = (SpectralCube.read(fn).with_spectral_unit(
        u.km / u.s, velocity_convention='radio',
        rest_value=freq).spectral_slab(55 * u.km / u.s,
                                       110 * u.km / u.s).to(u.K)).max(axis=0)
    siov2j2.write(siov2j2im)

hcnv3j1im = paths.Fpath('HCN_v3_j1_peak_N.fits')
if os.path.exists(hcnv3j1im):
    hcnv3j1 = Projection.from_hdu(fits.open(hcnv3j1im))
else: