Exemple #1
0
def moment_masking(cube, kernel_size, clip=5, dilations=1):
    '''
    '''

    if not signal_id_flag:
        raise ImportError("signal-id is not installed."
                          " This function is not available.")

    smooth_data = convolve(cube.filled_data[:], gauss_kern(kernel_size))

    fake_mask = LazyMask(np.isfinite, cube=cube)

    smooth_cube = SpectralCube(data=smooth_data, wcs=cube.wcs, mask=fake_mask)

    smooth_scale = Noise(smooth_cube).scale

    mask = (smooth_cube > (clip * smooth_scale)).include()

    # Now dilate the mask once

    dilate_struct = nd.generate_binary_structure(3, 3)
    mask = nd.binary_dilation(mask, structure=dilate_struct,
                              iterations=dilations)

    return mask
Exemple #2
0
    def create_spectral_cube(self, mask=None, wcs=None, header=None):
        if not hasattr(self, 'datacube'):
            raise RuntimeError('First run an aggregator function')
        spectra1d = self.snap_spectra.spectrum
        if header is None:
            _header = get_header()
            wvl = spectra1d.spectral_axis.value
            wvl_diff = np.diff(wvl)
            # Check if first diff is actually the same everywhere
            if not np.allclose(wvl_diff[0] * np.ones_like(wvl[:-1]),
                               wvl_diff,
                               rtol=1e-05,
                               atol=1e-08):
                raise RuntimeError('Spectral axis has varying resolution')
            _header['CD3_3'] = wvl_diff[0]
            _header['CRVAL3'] = wvl[0]
            _header['CRPIX1'] = _header['CRPIX2'] = self.bins / 2
            header = _header

        if wcs is None:
            wcs = WCS(header)

        if mask is None:
            mask = LazyMask(np.isfinite, data=self.datacube, wcs=wcs)

        print('Creating cube...')
        cube = MySpectralCube(data=self.datacube.astype(np.float32) *
                              spectra1d.flux.unit,
                              wcs=wcs,
                              mask=mask,
                              header=header)
        return cube
def load_and_reduce(filename, add_noise=False, rms_noise=0.001,
                    nsig=3):
    '''
    Load the cube in and derive the property arrays.
    '''

    if add_noise:
        if rms_noise is None:
            raise TypeError("Must specify value of rms noise.")

        cube, hdr = getdata(filename, header=True)

        from scipy.stats import norm
        cube += norm.rvs(0.0, rms_noise, cube.shape)

        sc = SpectralCube(data=cube, wcs=WCS(hdr))

        mask = LazyMask(np.isfinite, sc)
        sc = sc.with_mask(mask)

    else:
        sc = filename

    reduc = Mask_and_Moments(sc, scale=rms_noise)
    reduc.make_mask(mask=reduc.cube > nsig * reduc.scale)
    reduc.make_moments()
    reduc.make_moment_errors()

    return reduc.to_dict()
Exemple #4
0
def create_single_spectra_cube(bins, mask=None, header=None, wcs=None):
    from simifucube.generate_spectra import STD_MET, STD_AGE, to_spectrum1d
    from simifucube.spectra import Spectrum
    mass = 8000  # Msol
    print('Creating cube from a particle with age{}, met={}, mass={} Msol'.
          format(STD_AGE, STD_MET, mass))

    sp = Spectrum.from_met_age(STD_MET, STD_AGE)
    L_sol = 3.839e33  # erg s-1
    kpc_in_cm = 3.086e+21  # cm
    pos = np.array([0.0, 0.0, 0.0])
    z_dist = 20000  # kpc
    dist_sq = np.linalg.norm(pos - np.array([0, 0, z_dist]))**2
    sp.flux *= mass * L_sol / (4 * np.pi * dist_sq * kpc_in_cm**2)
    sp_list = [sp] * bins**2
    spectra1d = to_spectrum1d(sp_list)

    if header is None:
        _header = get_header()
        wvl = spectra1d.spectral_axis.value
        wvl_diff = np.diff(wvl)
        # Check if first diff is actually the same everywhere
        if not np.allclose(wvl_diff[0] * np.ones_like(wvl[:-1]),
                           wvl_diff,
                           rtol=1e-05,
                           atol=1e-08):
            raise RuntimeError('Spectral axis has varying resolution')
        _header['CD3_3'] = wvl_diff[0]
        _header['CRVAL3'] = wvl[0]
        _header['CRPIX1'] = _header['CRPIX2'] = bins / 2
        header = _header

    if wcs is None:
        wcs = WCS(header)

    data = spectra1d.data.reshape(bins, bins, spectra1d.shape[1]).astype(
        np.float32).transpose(2, 0, 1)

    if mask is None:
        mask = LazyMask(np.isfinite, data=data, wcs=wcs)
    cube = SpectralCube(data=data * spectra1d.flux.unit,
                        wcs=wcs,
                        mask=mask,
                        header=header)
    print("Writing output...")
    output_name = contract_name("ssp_cube_b{}{}.fits".format(
        bins, '_nods' if not doppler_shift else ''))
    write_cube(final_cube, variance_cube, output_name, overwrite_output)
    sys.exit(0)
Exemple #5
0
def load_and_reduce(filename,
                    add_noise=False,
                    rms_noise=0.001,
                    nsig=3,
                    slicewise_noise=True):
    '''
    Load the cube in and derive the property arrays.
    '''

    if add_noise:
        if rms_noise is None:
            raise TypeError("Must specify value of rms noise.")

        cube, hdr = getdata(filename, header=True)

        # Optionally scale noise by 1/10th of the 98th percentile in the cube
        if rms_noise == 'scaled':
            rms_noise = 0.1 * np.percentile(cube[np.isfinite(cube)], 98)

        from scipy.stats import norm
        if not slicewise_noise:
            cube += norm.rvs(0.0, rms_noise, cube.shape)
        else:
            spec_shape = cube.shape[0]
            slice_shape = cube.shape[1:]
            for i in range(spec_shape):
                cube[i, :, :] += norm.rvs(0.0, rms_noise, slice_shape)

        sc = SpectralCube(data=cube, wcs=WCS(hdr))

        mask = LazyMask(np.isfinite, sc)
        sc = sc.with_mask(mask)

    else:
        sc = filename

    reduc = Mask_and_Moments(sc, scale=rms_noise)
    reduc.make_mask(mask=reduc.cube > nsig * reduc.scale)
    reduc.make_moments()
    reduc.make_moment_errors()

    return reduc.to_dict()
Exemple #6
0
def create_cube(spectra1d, x, y, bins, mask=None, wcs=None, header=None):
    print('Aggregating spectra in {} bins...'.format(bins))
    if header is None:
        _header = get_header()
        wvl = spectra1d.spectral_axis.value
        wvl_diff = np.diff(wvl)
        # Check if first diff is actually the same everywhere
        if not np.allclose(wvl_diff[0] * np.ones_like(wvl[:-1]),
                           wvl_diff,
                           rtol=1e-05,
                           atol=1e-08):
            raise RuntimeError('Spectral axis has varying resolution')
        _header['CD3_3'] = wvl_diff[0]
        _header['CRVAL3'] = wvl[0]
        _header['CRPIX1'] = _header['CRPIX2'] = bins / 2
        header = _header

    if wcs is None:
        wcs = WCS(header)

    a = binned_statistic_2d(x,
                            y,
                            spectra1d.flux.transpose(1, 0),
                            statistic='sum',
                            bins=bins,
                            expand_binnumbers=True)
    if mask is None:
        mask = LazyMask(np.isfinite, data=a.statistic, wcs=wcs)

    print('Creating cube...')
    cube = SpectralCube(data=a.statistic.astype(np.float32) *
                        spectra1d.flux.unit,
                        wcs=wcs,
                        mask=mask,
                        header=header)

    return a, cube
Exemple #7
0
dataset1 = np.load(path1)

cube1 = np.empty((500, 32, 32))

count = 0
for posn, kept in zip(*dataset1["channels"]):
    posn = int(posn)
    if kept:
        cube1[posn, :, :] = dataset1["cube"][count, :, :]
        count += 1
    else:
        cube1[posn, :, :] = ra.normal(0.005, 0.005, (32, 32))

sc1 = SpectralCube(data=cube1, wcs=WCS(header))
mask = LazyMask(np.isfinite, sc1)
sc1 = sc1.with_mask(mask)
# Set the scale for the purposes of the tests
props1 = Moments(sc1, scale=0.003031065017916262 * u.Unit(""))
# props1.make_mask(mask=mask)
props1.make_moments()
props1.make_moment_errors()

dataset1 = props1.to_dict()

moment0_hdu1 = fits.PrimaryHDU(dataset1["moment0"][0],
                               header=dataset1["moment0"][1])

moment0_proj = Projection.from_hdu(moment0_hdu1)

##############################################################################
def smooth_cube(
        incube=None,
        outfile=None,
        angular_resolution=None,
        linear_resolution=None,
        distance=None,
        velocity_resolution=None,
        nan_treatment='interpolate', # can also be 'fill'
        tol=None,
        make_coverage_cube=False,
        collapse_coverage=False,
        coveragefile=None,
        coverage2dfile=None,
        dtype=np.float32,
        overwrite=True
    ):
    """
    Smooth an input cube to coarser angular or spectral
    resolution. This lightly wraps spectral cube and some of the error
    checking is left to that.

    tol is a fraction. When the target beam is within tol of the
    original beam, we just copy.

    Optionally, also calculate a coverage footprint in which original
    (finite) cube coverage starts at 1.0 and the output cube shows the
    fraction of finite pixels.
    """

    # &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
    # Error checking
    # &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%

    # Require a valid cube or map input
    if type(incube) is SpectralCube:
        cube = incube
    elif type(incube) == type("hello"):
        cube = SpectralCube.read(incube)
    else:
        logger.error("Input must be a SpectralCube object or a filename.")

    # Allow huge operations. If the speed or segfaults become a huge
    # problem, we will adjust our strategy here.

    cube.allow_huge_operations = True

    # Check that only one target scale is set
    if (angular_resolution is not None) and (linear_resolution is not None):
        logger.error('Only one of angular_resolution or ',
                     'linear_resolution can be set')
        return(None)

    # Work out the target angular resolution
    if angular_resolution is not None:
        if type(angular_resolution) is str:
            angular_resolution = u.Quantity(angular_resolution)

    if linear_resolution is not None:
        if distance is None:
            logger.error('Convolution to linear resolution requires a distance.')
            return(None)

        if type(distance) is str:
            distance = u.Quantity(distance)
        if type(linear_resolution) is str:
            linear_resolution = u.Quantity(linear_resolution)
        angular_resolution = (linear_resolution / distance * u.rad).to(u.arcsec)
        dist_mpc_val = float(distance.to(u.pc).value) / 1e6
        cube._header.append(('DIST_MPC',dist_mpc_val,'Used in convolution'))

    if tol is None:
        tol = 0.0

    # &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
    # Convolution to coarser beam
    # &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
    
    if angular_resolution is not None:
        logger.info("... convolving from beam: "+str(cube.beam))
        target_beam = Beam(major=angular_resolution,
                           minor=angular_resolution,
                           pa=0 * u.deg)
        logger.info("... convolving to beam: "+str(target_beam))

        new_major = float(target_beam.major.to(u.arcsec).value)
        old_major = float(cube.beam.major.to(u.arcsec).value)        
        delta = (new_major-old_major)/old_major

        logger.info("... fractional change: "+str(delta))
        
        if make_coverage_cube:
            coverage = SpectralCube(np.isfinite(cube.unmasked_data[:])*1.0,
                                    wcs=cube.wcs,
                                    header=cube.header,
                                    meta={'BUNIT': ' ', 'BTYPE': 'Coverage'})
            coverage = coverage.with_mask(LazyMask(np.isfinite,cube=coverage))
            
            # Allow huge operations. If the speed or segfaults become a huge
            # problem, we will adjust our strategy here.

            coverage.allow_huge_operations = True

        if delta > tol:
            logger.info("... proceeding with convolution.")
            cube = cube.convolve_to(target_beam,
                                    nan_treatment=nan_treatment)
            if make_coverage_cube:
                coverage = coverage.convolve_to(target_beam,
                                                nan_treatment=nan_treatment)

        if np.abs(delta) < tol:
            logger.info("... current resolution meets tolerance.")

        if delta < -1.0*tol:
            logger.info("... resolution cannot be matched. Returning")
            return(None)

    # &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
    # Spectral convolution
    # &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%

    # This is only a boxcar smooth right now and does not downsample
    # or update the header.

    if velocity_resolution is not None:
        if type(velocity_resolution) is str:
            velocity_resolution = u.Quantity(velocity_resolution)

        dv = scdr.channel_width(cube)
        nChan = (velocity_resolution / dv).to(u.dimensionless_unscaled).value
        if nChan > 1:
            cube = cube.spectral_smooth(Box1DKernel(nChan))
            if make_coverage_cube:
                coverage = coverage.spectral_smooth(Box1DKernel(nChan))

    # &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
    # Write or return as requested
    # &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%

    if outfile is not None:
        # cube.write(outfile, overwrite=overwrite)
        hdu = fits.PrimaryHDU(np.array(cube.filled_data[:], dtype=dtype),
                              header=cube.header)
        hdu.writeto(outfile, overwrite=overwrite)
        if make_coverage_cube:
            if coveragefile is not None:
                hdu = fits.PrimaryHDU(np.array(coverage.filled_data[:], dtype=dtype),
                                      header=coverage.header)
                hdu.writeto(coveragefile, overwrite=overwrite)
            if collapse_coverage:
                if coveragefile and not coverage2dfile:
                    coverage2dfile = coveragefile.replace('.fits','2d.fits')
                coverage_collapser(coverage,
                                   coverage2dfile=coverage2dfile,
                                   overwrite=overwrite)
                # coverage.write(coveragefile, overwrite=overwrite)

    return(cube)
def reduce_and_save(filename,
                    add_noise=False,
                    regrid_linewidth=False,
                    rms_noise=0.001 * u.K,
                    output_path="",
                    cube_output=None,
                    nsig=3,
                    slicewise_noise=True):
    '''
    Load the cube in and derive the property arrays.
    '''

    if add_noise or regrid_linewidth:

        sc = SpectralCube.read(filename)

        if add_noise:
            if rms_noise is None:
                raise TypeError("Must specify value of rms noise.")

            cube = sc.filled_data[:].value

            # Optionally scale noise by 1/10th of the 98th percentile in the
            # cube
            if rms_noise == 'scaled':
                rms_noise = 0.1 * \
                    np.percentile(cube[np.isfinite(cube)], 98) * sc.unit

            from scipy.stats import norm
            if not slicewise_noise:
                cube += norm.rvs(0.0, rms_noise.value, cube.shape)
            else:
                spec_shape = cube.shape[0]
                slice_shape = cube.shape[1:]
                for i in range(spec_shape):
                    cube[i, :, :] += norm.rvs(0.0, rms_noise.value,
                                              slice_shape)

            sc = SpectralCube(data=cube * sc.unit,
                              wcs=sc.wcs,
                              meta={"BUNIT": "K"})

            mask = LazyMask(np.isfinite, sc)
            sc = sc.with_mask(mask)

        if regrid_linewidth:
            # Normalize the cubes to have the same linewidth
            # channels_per_sigma=20 scales to the largest mean line width in
            # SimSuite8 (~800 km/s; Design 22). So effectively everything is
            # "smoothed" to have this line width
            # Intensities are normalized by their 95% value.
            sc = preprocessor(sc,
                              min_intensity=nsig * rms_noise,
                              norm_intensity=True,
                              norm_percentile=95,
                              channels_per_sigma=20)

    else:
        sc = filename

    # Run the same signal masking procedure that was used for the
    # COMPLETE cubes
    if add_noise:
        # The default settings were set based on these cubes
        sc = make_signal_mask(sc)[0]

    reduc = Mask_and_Moments(sc, scale=rms_noise)
    if not add_noise:
        reduc.make_mask(mask=reduc.cube > nsig * reduc.scale)

    reduc.make_moments()
    reduc.make_moment_errors()

    # Remove .fits from filename
    save_name = os.path.splitext(os.path.basename(filename))[0]

    reduc.to_fits(os.path.join(output_path, save_name))

    # Save the noisy cube too
    if add_noise or regrid_linewidth:
        save_name += ".fits"
        if cube_output is None:
            sc.hdu.writeto(os.path.join(output_path, save_name))
        else:
            sc.hdu.writeto(os.path.join(cube_output, save_name))
Exemple #10
0
from create_noisy_cube import get_header

snap_name = '/home/michele/sim/MySimulations/np_glass/mb.62002_p200_a800_r600/out/snapshot_0048'
size_cuboid=1
sp, (x,y) = spectra_from_snap(snap_name, size_cuboid=size_cuboid, doppler_shift=True)

idx = 1
# plt.step(sp.spectral_axis, sp[idx].flux)


bins=2
a = binned_statistic_2d(x,y, sp.flux.transpose(1,0), statistic='sum', bins=bins, expand_binnumbers=True)

_wcs = WCS(get_header())

mask = LazyMask(lambda x: x>0, data=a.statistic, wcs=_wcs)
cube = SpectralCube(data=a.statistic * u.Unit("erg / (Angstrom cm2 s)"), wcs=_wcs, mask=mask)
cube[:, 0, 0].quicklook()

# Take the first row and column of the bin
row, col = 0, 0
selected_sp = sp[np.where(a.binnumber[row] == col+1)]

# Sum spectra in those bin:
sp_row_col = selected_sp.flux.sum(axis=0)
fig, ax = plt.subplots()
ax.plot(selected_sp.spectral_axis, sp_row_col)

# try to compare but careful to reltol
# np.allclose(sp_row_col.value, cube[:, 0, 0].value)