Exemple #1
0
def deconvolve_moments(moments, beam, pa, pixscale):
    if moments['rmsx_ex'] >= moments['rmsy_ex']:
        beam_ex = Beam(major=moments['rmsx_ex'] * pixscale * sig2fwhm,
                       minor=moments['rmsy_ex'] * pixscale * sig2fwhm,
                       pa=pa * u.rad)
    else:
        beam_ex = Beam(major=moments['rmsy_ex'] * pixscale * sig2fwhm,
                       minor=moments['rmsx_ex'] * pixscale * sig2fwhm,
                       pa=(pa + np.pi / 2) * u.rad)
    beam_ex_dc = beam_ex.deconvolve(beam, failure_returns_pointlike=True)

    if moments['rmsx_noex'] >= moments['rmsy_noex']:
        beam_noex = Beam(major=moments['rmsx_noex'] * pixscale * sig2fwhm,
                         minor=moments['rmsy_noex'] * pixscale * sig2fwhm,
                         pa=pa * u.rad)
    else:
        beam_noex = Beam(major=moments['rmsy_noex'] * pixscale * sig2fwhm,
                         minor=moments['rmsx_noex'] * pixscale * sig2fwhm,
                         pa=(pa + np.pi / 2) * u.rad)
    beam_noex_dc = beam_noex.deconvolve(beam, failure_returns_pointlike=True)

    outdict = {}
    outdict['rmsx_ex'] = (beam_ex_dc.major / pixscale / sig2fwhm).to(
        u.dimensionless_unscaled).value
    outdict['rmsy_ex'] = (beam_ex_dc.minor / pixscale / sig2fwhm).to(
        u.dimensionless_unscaled).value
    outdict['pa_ex'] = (beam_ex_dc.pa).to(u.rad).value
    outdict['rmsx_noex'] = (beam_noex.major / pixscale / sig2fwhm).to(
        u.dimensionless_unscaled).value
    outdict['rmsy_noex'] = (beam_noex.minor / pixscale / sig2fwhm).to(
        u.dimensionless_unscaled).value
    outdict['pa_noex'] = (beam_noex.pa).to(u.rad).value
    return (outdict)
Exemple #2
0
def main(args):
    hdul = fits.open(args.file_in, 'readonly')
    raw_img = np.squeeze(hdul[0].data)
    img_wcs = wcs.WCS(hdul[0].header)
    pixscale = wcs.utils.proj_plane_pixel_area(img_wcs.celestial)**0.5 * u.deg

    raw_beam = Beam(args.beam * u.arcmin)
    new_beam = Beam(args.cbeam * u.arcmin)
    cov_beam = new_beam.deconvolve(raw_beam)
    cov_kernel = cov_beam.as_kernel(pixscale)

    new_img = convolution.convolve_fft(raw_img,
                                       cov_kernel,
                                       normalize_kernel=True,
                                       allow_huge=True)
    print(new_img.shape)

    new_data = np.reshape(new_img, hdul[0].data.shape)

    print(new_data.shape)

    new_hdu = fits.PrimaryHDU(new_data)
    new_hdu.header = hdul[0].header.copy()
    new_hdu.header['NAXIS'] = 4
    new_hdu.writeto(args.fileout, overwrite=True)

    return 0
def test_ldo_attach_beam(LDO, data):

    exp_beam = Beam(1.0 * u.arcsec)
    newbeam = Beam(2.0 * u.arcsec)

    p = LDO(data, copy=False, beam=exp_beam)

    new_p = p.with_beam(newbeam)

    assert p.beam == exp_beam
    assert p.meta['beam'] == exp_beam

    assert new_p.beam == newbeam
    assert new_p.meta['beam'] == newbeam
Exemple #4
0
def fixJialu(image, beam=15.0):
    '''
    Purpose: fix up Jialu's IC0342 cube so we can use it for analysis for DEGAS
    '''

    #12CO rest frequency
    rest_freq_12co = 115.27120180 * u.GHz

    f = fits.open(image)

    f[0].header['BUNIT'] = 'K'
    f[0].header['RESTFRQ'] = float(rest_freq_12co.to(u.Hz).value)

    f.writeto(image.replace('.fits', '_fixed.fits'), overwrite=True)

    image = image.replace('.fits', '_fixed.fits')

    # open image
    cube = SpectralCube.read(image)

    cube_kms = cube.with_spectral_unit(u.km / u.s)

    ## This should set header
    #cube_ms = cube.with_spectral_unit(u.m / u.s, rest_value=rest_freq_12co)

    # chop off bad edge
    subcube = cube_kms.subcube(xlo=0, xhi=134, ylo=0, yhi=150)

    # add beam
    beamcube = subcube.with_beam(Beam(8.0 * u.arcsec))

    # smooth
    newBeam = Beam(beam * u.arcsec)
    smoothCube = beamcube.convolve_to(newBeam)

    # smooth spectrally.
    smoothFactor = 3.0
    spSmoothCube = smoothCube.spectral_smooth(Box1DKernel(smoothFactor))
    spec_axis = spSmoothCube.spectral_axis
    chan_width = spec_axis[1] - spec_axis[
        0]  # channels are equally spaced in velocity
    new_axis = np.arange(spec_axis[0].value, spec_axis[-1].value,
                         smoothFactor * chan_width.value) * u.km / u.s

    interpCube = spSmoothCube.spectral_interpolate(
        new_axis, suppress_smooth_warning=False)

    # write out
    interpCube.write(image.replace('8arcsec_fixed.fits', '10kms_gauss15.fits'),
                     overwrite=True)
def test_projection_attach_beam():

    exp_beam = Beam(1.0 * u.arcsec)
    newbeam = Beam(2.0 * u.arcsec)

    proj, hdu = load_projection("55.fits")

    new_proj = proj.with_beam(newbeam)

    assert proj.beam == exp_beam
    assert proj.meta['beam'] == exp_beam

    assert new_proj.beam == newbeam
    assert new_proj.meta['beam'] == newbeam
Exemple #6
0
def get_spatialsmooth(cube, res_t):
    """Spatailly smooth data

    Parameters
    ----------
    cube : spectral cube object
    res_t : float
        target *resolusion* (in arcsec)
        *not* channel width - do get_spectralregrid

    Returns
    -------
    smcube = Output cube containing the spatailly smoothed data and updated header
    """

    res_c = cube.header['BMAJ'] * 3600

    print("[INFO] Current resolution of %0.1f arcsec" % (res_c))
    print("[INFO] Target resolution of %0.1f arcsec" % (res_t))

    res_t_ = Beam(major=res_t * au.arcsec,
                  minor=res_t * au.arcsec,
                  pa=0 * au.deg)
    smcube = cube.convolve_to(res_t_)

    return smcube
Exemple #7
0
def conv_model(model_image, clean_beam):
    if isinstance(model_image, BaseSpectralCube):
        model = model_image
    else:
        model = SpectralCube.read(model_image, format='casa_image')
    beam = clean_beam

    pix_scale = model.header['CDELT2'] * u.deg
    pix_scale = pix_scale.to(u.arcsec)
    clean_beam_kernel = beam.as_kernel(pix_scale)

    omega_beam = beam.sr
    omega_pix = pix_scale.to('rad')**2
    npix_beam = (omega_beam / omega_pix).value

    # should we just use a delta function rather than try to hack correct pixel area?
    # alternately, we could deconvolve a pixel size.
    # What is technically correct?
    # What does CASA do?  (scary question)
    fwhm_gauss_pix = (4 * np.log(2) / np.pi)**0.5 * pix_scale
    pix_beam = Beam(fwhm_gauss_pix, fwhm_gauss_pix, 0 * u.deg)
    model = model.with_beam(pix_beam)

    conv = model.convolve_to(beam) * npix_beam

    return conv
Exemple #8
0
def fixExtraHERAfromAdam(fitsimage, beam=15.0):
    '''
    Purpose: fix up extra HERA data from Adam
    '''

    # switch header from M/S to m/s to fix up wcs read errors with SpectralCube
    f = fits.open(fitsimage)
    f[0].header['CUNIT3'] = 'm/s'
    newimage = fitsimage.replace('.fits', '_fixed.fits')
    f.writeto(newimage, overwrite=True)
    f.close()

    # open image
    cube = SpectralCube.read(newimage)

    # switch to km/s
    cube_kms = cube.with_spectral_unit(u.km / u.s)

    # smooth
    newBeam = Beam(beam * u.arcsec)
    smoothCube = cube_kms.convolve_to(newBeam)

    # write out
    smoothCube.write(newimage.replace('.fits', '_10kms_gauss15.fits'),
                     overwrite=True)
Exemple #9
0
    def identify_bad_beams(self,
                           threshold,
                           reference_beam=None,
                           criteria=['sr', 'major', 'minor'],
                           mid_value=np.nanmedian):
        """
        Mask out any layers in the cube that have beams that differ from the
        central value of the beam by more than the specified threshold.

        Parameters
        ----------
        threshold : float
            Fractional threshold
        reference_beam : Beam
            A beam to use as the reference.  If unspecified, ``mid_value`` will
            be used to select a middle beam
        criteria : list
            A list of criteria to compare.  Can include
            'sr','major','minor','pa' or any subset of those.
        mid_value : function
            The function used to determine the 'mid' value to compare to.  This
            will identify the middle-valued beam area/major/minor/pa.

        Returns
        -------
        includemask : np.array
            A boolean array where ``True`` indicates the good beams
        """

        includemask = np.ones(self.unmasked_beams.size, dtype='bool')

        all_criteria = {'sr', 'major', 'minor', 'pa'}
        if not set.issubset(set(criteria), set(all_criteria)):
            raise ValueError("Criteria must be one of the allowed options: "
                             "{0}".format(all_criteria))

        props = {
            prop:
            u.Quantity([getattr(beam, prop) for beam in self.unmasked_beams])
            for prop in all_criteria
        }

        if reference_beam is None:
            reference_beam = Beam(major=mid_value(props['major']),
                                  minor=mid_value(props['minor']),
                                  pa=mid_value(props['pa']))

        for prop in criteria:
            val = props[prop]
            mid = getattr(reference_beam, prop)

            diff = np.abs((val - mid) / mid)

            assert diff.shape == includemask.shape

            includemask[diff > threshold] = False

        return includemask
Exemple #10
0
def test_single_gray_hole():
    one_gray_hole = add_holes((200, 200), hole_level=100, nholes=1)
    test_gray_hole = Projection(one_gray_hole, wcs=wcs.WCS())

    test_bubble = BubbleFinder2D(test_gray_hole,
                                 beam=Beam(10),
                                 channel=0,
                                 sigma=40)
    test_bubble.multiscale_bubblefind(edge_find=False)
Exemple #11
0
def fixNRO(fitsimage):
    '''
    fix up NRO data
    '''

    cube = SpectralCube.read(fitsimage)
    # add beam
    beam_cube = cube.with_beam(Beam(15.0 * u.arcsec))
    beam_cube.write(fitsimage.replace('.FITS', '_fixed.fits'), overwrite=True)
Exemple #12
0
def convolve_sky_byfactor(cube,
                          factor,
                          savename=None,
                          edgetrim_width=5,
                          downsample=True,
                          **kwargs):

    factor = factor * 1.0

    if not isinstance(cube, SpectralCube):
        cube = SpectralCube.read(cube)

    if edgetrim_width is not None:
        cube = edge_trim(cube, trim_width=edgetrim_width)

    hdr = cube.header

    # sanity check
    if hdr['CUNIT1'] != hdr['CUNIT2']:
        print "[ERROR]: the axis units for the do not match each other!"
        return None

    beamunit = getattr(u, hdr['CUNIT1'])
    bmaj = hdr['BMAJ'] * beamunit * factor
    bmin = hdr['BMIN'] * beamunit * factor
    pa = hdr['BPA']

    beam = Beam(major=bmaj, minor=bmin, pa=pa)

    # convolve
    cnv_cube = convolve_sky(cube, beam, **kwargs)

    if cnv_cube.fill_value is not np.nan:
        cnv_cube = cnv_cube.with_fill_value(np.nan)
    #cnv_cube = cnv_cube.with_fill_value(0.0)

    if downsample:
        # regrid the convolved cube
        nhdr = FITS_tools.downsample.downsample_header(hdr,
                                                       factor=factor,
                                                       axis=1)
        nhdr = FITS_tools.downsample.downsample_header(nhdr,
                                                       factor=factor,
                                                       axis=2)
        nhdr['NAXIS1'] = int(np.rint(hdr['NAXIS1'] / factor))
        nhdr['NAXIS2'] = int(np.rint(hdr['NAXIS2'] / factor))
        newcube = cnv_cube.reproject(nhdr, order='bilinear')
        # newcube = cnv_cube.reproject(nhdr, order='bicubic')
    else:
        newcube = cnv_cube

    if savename is not None:
        newcube.write(savename, overwrite=True)

    return newcube
def test_projection_with_beam(data_55):

    exp_beam = Beam(1.0 * u.arcsec)

    proj, hdu = load_projection(data_55)

    # uses from_hdu, which passes beam as kwarg
    assert proj.beam == exp_beam
    assert proj.meta['beam'] == exp_beam

    # load beam from meta
    exp_beam = Beam(1.5 * u.arcsec)

    meta = {"beam": exp_beam}
    new_proj = Projection(hdu.data, wcs=proj.wcs, meta=meta)

    assert new_proj.beam == exp_beam
    assert new_proj.meta['beam'] == exp_beam

    # load beam from given header
    exp_beam = Beam(2.0 * u.arcsec)
    header = hdu.header.copy()
    header = exp_beam.attach_to_header(header)
    new_proj = Projection(hdu.data,
                          wcs=proj.wcs,
                          header=header,
                          read_beam=True)

    assert new_proj.beam == exp_beam
    assert new_proj.meta['beam'] == exp_beam

    # load beam from beam object
    exp_beam = Beam(3.0 * u.arcsec)
    header = hdu.header.copy()
    del header["BMAJ"], header["BMIN"], header["BPA"]
    new_proj = Projection(hdu.data, wcs=proj.wcs, header=header, beam=exp_beam)

    assert new_proj.beam == exp_beam
    assert new_proj.meta['beam'] == exp_beam

    # Slice the projection with a beam and check it's still there
    assert new_proj[:1, :1].beam == exp_beam
Exemple #14
0
def test_nocelestial_convolution_2D_fail(data_255_delta, use_dask):

    cube, data = cube_and_raw(data_255_delta, use_dask=use_dask)

    proj = cube.moment0(axis=1)

    test_beam = Beam(1.0 * u.arcsec)

    with pytest.raises(WCSCelestialError,
                       match="WCS does not contain two spatial axes."):
        proj.convolve_to(test_beam)
Exemple #15
0
def main(pool, args, verbose=False):
    """Main script
    """

    # Fix up outdir
    outdir = args.outdir
    if outdir is not None:
        if outdir[-1] == '/':
            outdir = outdir[:-1]
    else:
        outdir = '.'

    # Get file list
    files = glob(args.infile)
    if files == []:
        raise Exception('No files found!')

    # Parse args
    bmaj = args.bmaj
    bmin = args.bmin
    bpa = args.bpa

    # Find largest bmax
    big_beam = getmaxbeam(files, verbose=verbose)

    # Set to largest
    if bpa is None and bmin is None and bmaj is None:
        bpa = big_beam.pa.to(u.deg)
    else:
        bpa = 0 * u.deg
    if bmaj is None:
        bmaj = round_up(big_beam.major.to(u.arcsec))
    elif bmaj * u.arcsec < round_up(big_beam.major.to(u.arcsec)):
        raise Exception('Selected BMAJ is too small!')
    else:
        bmaj *= u.arcsec
    if bmin is None:
        bmin = round_up(big_beam.minor.to(u.arcsec))
    elif bmin * u.arcsec < round_up(big_beam.minor.to(u.arcsec)):
        raise Exception('Selected BMIN is too small!')
    else:
        bmin *= u.arcsec

    new_beam = Beam(bmaj, bmin, bpa)
    if verbose:
        print(f'Final beam is', new_beam)

    inputs = [[file, outdir, new_beam, args, verbose]
              for i, file in enumerate(files)]

    output = list(pool.map(worker, inputs))

    if verbose:
        print('Done!')
Exemple #16
0
def test_nocelestial_convolution_2D_fail():

    cube, data = cube_and_raw('255_delta.fits')

    proj = cube.moment0(axis=1)

    test_beam = Beam(1.0 * u.arcsec)

    with pytest.raises(WCSCelestialError) as exc:
        proj.convolve_to(test_beam)

    assert exc.value.args[0] == ("WCS does not contain two spatial axes.")
def test_ondespectrum_with_beam():

    exp_beam = Beam(1.0 * u.arcsec)

    test_wcs_1 = WCS(naxis=1)
    spec = OneDSpectrum(twelve_qty_1d, wcs=test_wcs_1)

    # load beam from meta
    meta = {"beam": exp_beam}
    new_spec = OneDSpectrum(spec.data, wcs=spec.wcs, meta=meta)

    assert new_spec.beam == exp_beam
    assert new_spec.meta['beam'] == exp_beam

    # load beam from given header
    hdu = spec.hdu
    exp_beam = Beam(2.0 * u.arcsec)
    header = hdu.header.copy()
    header = exp_beam.attach_to_header(header)
    new_spec = OneDSpectrum(hdu.data,
                            wcs=spec.wcs,
                            header=header,
                            read_beam=True)

    assert new_spec.beam == exp_beam
    assert new_spec.meta['beam'] == exp_beam

    # load beam from beam object
    exp_beam = Beam(3.0 * u.arcsec)
    header = hdu.header.copy()
    new_spec = OneDSpectrum(hdu.data,
                            wcs=spec.wcs,
                            header=header,
                            beam=exp_beam)

    assert new_spec.beam == exp_beam
    assert new_spec.meta['beam'] == exp_beam

    # Slice the spectrum with a beam and check it's still there
    assert new_spec[:1].beam == exp_beam
Exemple #18
0
def image_sz512as_pl1p5_fwhm2as_scale1as(tmp_path):

    pixel_scale = 1 * units.arcsec
    restfreq = 100 * units.GHz

    highres_major = 2 * units.arcsec

    # Generate input image
    input_hdu = generate_test_fits(imsize=512, powerlaw=1.5,
                                beamfwhm=highres_major,
                                pixel_scale=pixel_scale,
                                restfreq=restfreq,
                                brightness_unit=units.Jy / units.sr)

    input_fn = tmp_path / "input_image_sz512as_pl1.5_fwhm2as_scale1as.fits"
    input_hdu.writeto(input_fn, overwrite=True)

    input_proj = Projection.from_hdu(input_hdu).to(units.Jy / units.beam)

    # Make Interferometric image
    intf_data = interferometrically_observe_image(image=input_hdu.data,
                                                pixel_scale=pixel_scale,
                                                largest_angular_scale=40*units.arcsec,
                                                smallest_angular_scale=highres_major)[0].real
    intf_hdu = fits.PrimaryHDU(data=intf_data.value if hasattr(intf_data, "value") else intf_data,
                                header=input_hdu.header)
    intf_proj = Projection.from_hdu(intf_hdu).to(units.Jy / units.beam)
    intf_fn = tmp_path / "input_image_sz512as_pl1.5_fwhm2as_scale1as_intf2to40as.fits"
    intf_proj.write(intf_fn, overwrite=True)

    # Make SD image
    sd_header = input_hdu.header.copy()

    major = 15*units.arcsec
    # Eff SD diam (to compare with CASA in troubleshooting)

    sd_beam = Beam(major=major)
    sd_header.update(sd_beam.to_header_keywords())

    sd_fn = tmp_path / "input_image_sz512as_pl1.5_fwhm2as_scale1as_sd15as.fits"
    sd_data = singledish_observe_image(input_hdu.data,
                                    pixel_scale=pixel_scale,
                                    beam=sd_beam,
                                    boundary='wrap')

    sd_hdu = fits.PrimaryHDU(data=sd_data.value if hasattr(sd_data, "value") else sd_data,
                            header=sd_header)
    sd_hdu.header.update(sd_beam.to_header_keywords())
    sd_proj = Projection.from_hdu(sd_hdu).to(units.Jy / units.beam)
    sd_proj.write(sd_fn, overwrite=True)

    return tmp_path, input_fn, intf_fn, sd_fn
Exemple #19
0
def test_pspec(plotname="pspec_rnoise_beamsmooth_apodizetukey.pdf",
               size=256,
               powerlaw=3.,
               run_kwargs={
                   'verbose': False,
                   'apodize_kernel': 'tukey'
               },
               plot_kwargs={'fit_color': 'black'},
               beam_smooth=True,
               pixel_scale=2 * u.arcsec,
               bmin=8.09 * u.arcsec,
               bmaj=10.01 * u.arcsec,
               bpa=-12.9 * u.deg,
               restfreq=1.4 * u.GHz,
               bunit=u.K):
    from spectral_cube import Projection
    from radio_beam import Beam

    rnoise_img = make_extended(size, powerlaw)
    # Create a FITS HDU
    rnoise_hdu = create_fits_hdu(rnoise_img, 2 * u.arcsec, 2 * u.arcsec,
                                 rnoise_img.shape, 1.4 * u.GHz, u.K)

    pspec = PowerSpectrum(rnoise_hdu)

    if beam_smooth:
        pencil_beam = Beam(0 * u.deg)
        rnoise_proj = Projection.from_hdu(rnoise_hdu).with_beam(pencil_beam)
        new_beam = Beam(bmaj, bmin, bpa)
        rnoise_conv = rnoise_proj.convolve_to(new_beam)

        # hdr = fits.Header(header)
        # rnoise_hdu = fits.PrimaryHDU(rnoise_img, header=hdr)
        pspec = PowerSpectrum(rnoise_conv)

    pspec.run(**run_kwargs)
    pspec.plot_fit(save_name=plotname, **plot_kwargs)

    return pspec
Exemple #20
0
def test_gauss_hole():
    one_gauss_hole, params = add_gaussian_holes(np.ones((200, 200)),
                                                nholes=1,
                                                return_info=True)
    test_gauss_hole = Projection(one_gauss_hole, wcs=wcs.WCS())

    test_bubble = BubbleFinder2D(test_gauss_hole,
                                 beam=Beam(10),
                                 channel=0,
                                 sigma=0.05)
    test_bubble.multiscale_bubblefind(edge_find=True)

    test_bubble.visualize_regions(edges=True)
Exemple #21
0
def smallest_beam(beams, includemask=None):
    """
    Returns the smallest beam (by area) in a list of beams.
    """

    from radio_beam import Beam

    major, minor, pa = beam_props(beams, includemask)
    smallest_idx = (major * minor).argmin()
    new_beam = Beam(major=major[smallest_idx], minor=minor[smallest_idx],
                    pa=pa[smallest_idx])

    return new_beam
Exemple #22
0
def test_beams_convolution_equal():
    cube, data = cube_and_raw('522_delta_beams.fits')

    # Only checking that the equal beam case is handled correctly.
    # Fake the beam in the first channel. Then ensure that the first channel
    # has NOT been convolved.
    target_beam = Beam(1.0 * u.arcsec, 1.0 * u.arcsec, 0.0 * u.deg)
    cube.beams[0] = target_beam

    conv_cube = cube.convolve_to(target_beam)

    np.testing.assert_almost_equal(cube.filled_data[0].value,
                                   conv_cube.filled_data[0].value)
def test_projection_from_hdu_with_beam(LDO, data):

    p = LDO(data, copy=False)

    hdu = p.hdu

    beam = Beam(1 * u.arcsec)
    hdu.header = beam.attach_to_header(hdu.header)

    p_new = LDO.from_hdu(hdu)

    assert (p == p_new).all()
    assert beam == p_new.meta['beam']
    assert beam == p_new.beam
Exemple #24
0
def fixPhangs(image, beam=15.0):
    '''Purpose: fix up Phangs data so we can do analysis for DEGAS.

    4/1/2021: I needed to modify, spectral_cube/base_class.py,
    _get_filled_data function to explicitly use memory mapping. Hack
    courtesy of Eric Koch. Evidently the cube was memory mapped, but
    not the mask.

    return self._mask._filled(data=data, wcs=self._wcs, fill=fill,
                                  view=view, wcs_tolerance=self._wcs_tolerance,
                                  #use_memmap=use_memmap
                                  use_memmap=False # hack to get phangs data to work
                                 )

    '''
    #os.environ['TMPDIR'] = '/lustre/cv/users/akepley/tmp'

    #cube = SpectralCube.read(image)
    cube = SpectralCube.read(image, use_memmap=False, mode='readonly')

    if re.search('ngc2903', image):
        subCube = cube[59:332, :, :]
    else:
        subCube = cube[:, :, :]

    cube_kms = subCube.with_spectral_unit(u.km / u.s)

    smoothFactor = 4.0
    cube_kms.allow_huge_operations = True
    spSmoothCube = cube_kms.spectral_smooth(Box1DKernel(smoothFactor),
                                            use_memmap=False,
                                            verbose=1)

    # Interpolate onto a new axis
    spec_axis = spSmoothCube.spectral_axis
    chan_width = spec_axis[1] - spec_axis[
        0]  # channels are equally spaced in velocity
    new_axis = np.arange(spec_axis[0].value, spec_axis[-1].value,
                         smoothFactor * chan_width.value) * u.km / u.s

    interpCube = spSmoothCube.spectral_interpolate(
        new_axis, suppress_smooth_warning=True)

    newBeam = Beam(beam * u.arcsec)

    #interpCube.allow_huge_operations=True
    smoothCube = interpCube.convolve_to(newBeam)
    smoothCube.write(image.replace('_7p5as.fits', '_10kms_gauss15.fits'),
                     overwrite=True)
Exemple #25
0
def average_beams(beams, includemask=None):
    """
    Average the beam major, minor, and PA attributes.

    This is usually a dumb thing to do!
    """

    from radio_beam import Beam
    from astropy.stats import circmean

    major, minor, pa = beam_props(beams, includemask)
    new_beam = Beam(major=major.mean(), minor=minor.mean(),
                    pa=circmean(pa, weights=major/minor))

    return new_beam
Exemple #26
0
def generate_testing_data(return_images=True,
                          powerlawindex=1.5,
                          largest_scale=56. * u.arcsec,
                          smallest_scale=3. * u.arcsec,
                          lowresfwhm=30. * u.arcsec,
                          pixel_scale=1 * u.arcsec,
                          imsize=512,
                          seed=32788324):

    orig_img = make_extended(imsize=imsize, powerlaw=powerlawindex, seed=seed)

    restfreq = (2 * u.mm).to(u.GHz, u.spectral())

    sd_img = singledish_observe_image(orig_img, pixel_scale, Beam(lowresfwhm))

    interf_img = \
        interferometrically_observe_image(orig_img, pixel_scale,
                                          largest_scale,
                                          smallest_scale)[0].real

    # Make these FITS HDUs
    orig_hdr = generate_header(pixel_scale, pixel_scale, imsize, restfreq)
    orig_hdu = fits.PrimaryHDU(orig_img, header=orig_hdr)

    sd_hdr = generate_header(pixel_scale, lowresfwhm, imsize, restfreq)
    sd_hdu = fits.PrimaryHDU(sd_img, header=sd_hdr)

    interf_hdr = generate_header(pixel_scale, smallest_scale, imsize, restfreq)
    interf_hdu = fits.PrimaryHDU(interf_img, header=interf_hdr)

    if return_images:
        return orig_hdu, sd_hdu, interf_hdu

    angscales, ratios, highres_pts, lowres_pts = \
        feather_compare(interf_hdu, sd_hdu, SAS=lowresfwhm, LAS=largest_scale,
                        lowresfwhm=lowresfwhm, return_samples=True,
                        doplot=False)

    # There are a bunch of tiny points that should be empty, but aren't b/c
    # of numerical rounding
    good_pts = ratios > 1e-10
    angscales = angscales[good_pts]
    ratios = ratios[good_pts]
    highres_pts = highres_pts[good_pts]
    lowres_pts = lowres_pts[good_pts]

    return angscales, ratios, lowres_pts, highres_pts
Exemple #27
0
def fixExtraHERA(fitsimage, beam=15.0):
    '''
    Fix the extra heracles data.

    TBD: need to add an efficiency correction to these data.
    '''

    f = fits.open(fitsimage)
    f[0].header['CUNIT3'] = 'm/s'
    newimage = fitsimage.replace('.fits', '_fixed.fits')
    f.writeto(newimage, overwrite=True)
    f.close()

    # open image
    cube = SpectralCube.read(newimage)

    cube_kms = cube.with_spectral_unit(u.km / u.s)

    if re.search('ngc3631', newimage):
        # extra channels with data
        subCube = cube_kms[72:379, :, :]
    elif re.search('ngc4030', newimage):
        subCube = cube_kms[78:384, :, :]
    else:
        subCube = cube_kms[:, :, :]

    # smooth
    newBeam = Beam(beam * u.arcsec)
    smoothCube = subCube.convolve_to(newBeam)

    # smooth velocity
    smoothFactor = 4.0
    spSmoothCube = smoothCube.spectral_smooth(Box1DKernel(smoothFactor))

    # Interpolate onto a new axis
    spec_axis = spSmoothCube.spectral_axis
    chan_width = spec_axis[1] - spec_axis[
        0]  # channels are equally spaced in velocity
    new_axis = np.arange(spec_axis[0].value, spec_axis[-1].value,
                         smoothFactor * chan_width.value) * u.km / u.s

    interpCube = spSmoothCube.spectral_interpolate(
        new_axis, suppress_smooth_warning=True)

    # write out
    interpCube.write(newimage.replace('.fits', '_10kms_gauss15.fits'),
                     overwrite=True)
Exemple #28
0
def test_beams_convolution():
    cube, data = cube_and_raw('455_delta_beams.fits')

    # 1" convolved with 1.5" -> 1.8027....
    target_beam = Beam(1.802775637731995*u.arcsec, 1.802775637731995*u.arcsec,
                       0*u.deg)

    conv_cube = cube.convolve_to(target_beam)

    pixscale = wcs.utils.proj_plane_pixel_area(cube.wcs.celestial)**0.5*u.deg

    for ii,bm in enumerate(cube.beams):
        expected = target_beam.deconvolve(bm).as_kernel(pixscale, x_size=5,
                                                        y_size=5)

        np.testing.assert_almost_equal(expected.array,
                                       conv_cube.filled_data[ii,:,:].value)
Exemple #29
0
def postConvolve(filein,
                 bmaj=None,
                 bmin=None,
                 bpa=0 * u.deg,
                 beamscale=1.1,
                 fileout=None):
    """
    This is a cube convolution wrapper that increases the beam size
    modestly to improve sensitivity.

    filein : str
        Name of the FITS file to convolve

    bmaj ; astropy.Quantity.Angle
        Size of the new beam major axis

    bmin ; astropy.Quantity.Angle
        Size of the new beam minor axis
    
    bpa ; astropy.Quantity.Angle
        Position angle of new beam

    beamscale : np.float
       Increase the beam size by this fraction upon convolution.
       Default to 1.1
       
    fileout : str
       Name of file to write out.  Defaults to appending '_conv' to
       filename

    """
    if '.fits' not in filein:
        filein += '.fits'
    cube = SpectralCube.read(filein)

    if bmaj is None:
        bmaj = cube.beam.major * beamscale
    if bmin is None:
        bmin = bmaj
    if fileout is None:
        fileout = filein.replace('.fits', '_conv.fits')

    targetBeam = Beam(bmaj, bmin, bpa)
    newcube = cube.convolve_to(targetBeam)
    newcube.write(fileout, overwrite=True)
Exemple #30
0
def fixOVRO(fitsimage, beam=15.0):
    '''
    fix up OVRO data.
    '''

    # open image
    cube = SpectralCube.read(fitsimage)

    # convert to K
    kcube = cube.to(u.K)

    # smooth
    newBeam = Beam(beam * u.arcsec)
    smoothCube = kcube.convolve_to(newBeam)

    # write out
    smoothCube.write(fitsimage.replace('.fits', '_gauss15_fixed.fits'),
                     overwrite=True)