예제 #1
0
def load_and_reduce(filename, add_noise=False, rms_noise=0.001,
                    nsig=3):
    '''
    Load the cube in and derive the property arrays.
    '''

    if add_noise:
        if rms_noise is None:
            raise TypeError("Must specify value of rms noise.")

        cube, hdr = getdata(filename, header=True)

        from scipy.stats import norm
        cube += norm.rvs(0.0, rms_noise, cube.shape)

        sc = SpectralCube(data=cube, wcs=WCS(hdr))

        mask = LazyMask(np.isfinite, sc)
        sc = sc.with_mask(mask)

    else:
        sc = filename

    reduc = Mask_and_Moments(sc, scale=rms_noise)
    reduc.make_mask(mask=reduc.cube > nsig * reduc.scale)
    reduc.make_moments()
    reduc.make_moment_errors()

    return reduc.to_dict()
예제 #2
0
def load_and_reduce(filename,
                    add_noise=False,
                    rms_noise=0.001,
                    nsig=3,
                    slicewise_noise=True):
    '''
    Load the cube in and derive the property arrays.
    '''

    if add_noise:
        if rms_noise is None:
            raise TypeError("Must specify value of rms noise.")

        cube, hdr = getdata(filename, header=True)

        # Optionally scale noise by 1/10th of the 98th percentile in the cube
        if rms_noise == 'scaled':
            rms_noise = 0.1 * np.percentile(cube[np.isfinite(cube)], 98)

        from scipy.stats import norm
        if not slicewise_noise:
            cube += norm.rvs(0.0, rms_noise, cube.shape)
        else:
            spec_shape = cube.shape[0]
            slice_shape = cube.shape[1:]
            for i in range(spec_shape):
                cube[i, :, :] += norm.rvs(0.0, rms_noise, slice_shape)

        sc = SpectralCube(data=cube, wcs=WCS(hdr))

        mask = LazyMask(np.isfinite, sc)
        sc = sc.with_mask(mask)

    else:
        sc = filename

    reduc = Mask_and_Moments(sc, scale=rms_noise)
    reduc.make_mask(mask=reduc.cube > nsig * reduc.scale)
    reduc.make_moments()
    reduc.make_moment_errors()

    return reduc.to_dict()
예제 #3
0
    for i in ProgressBar(mask.shape[0]):
        mask[i] = nd.binary_opening(mask[i], kernel)
        mask[i] = nd.binary_closing(mask[i], kernel)
        mask[i] = mo.remove_small_objects(mask[i],
                                          min_size=kernel_pix,
                                          connectivity=2)
        mask[i] = mo.remove_small_holes(mask[i],
                                        min_size=kernel_pix,
                                        connectivity=2)

    # Each region must contain a point above the peak_snr
    labels, num = nd.label(mask, np.ones((3, 3, 3)))
    for n in range(1, num + 1):
        pts = np.where(labels == n)
        if np.nanmax(snr[pts]) < peak_snr:
            mask[pts] = False

    masked_cube = cube.with_mask(mask)

    # Save the cube
    masked_cube.write("{}.masked.fits".format(name))

    # Now make
    reduc = Mask_and_Moments(masked_cube, scale=noise.scale)

    reduc.make_moments()
    reduc.make_moment_errors()

    reduc.to_fits(os.path.join("moments/", name))
예제 #4
0
                 append_prefix=True, design_labels=[], verbose=False)

# Now the AMR cubes
fiducials_amr, _, _ = \
    files_sorter(path_to_amrdata, append_prefix=True, design_labels=[],
                 faces=faces, timesteps='last', verbose=False)

# If the AMR moments path doesn't exist, make the moment arrays and save.
if not os.path.exists(amrmoments_path):

    os.mkdir(amrmoments_path)

    for face in faces:
        for fid in fiducials_amr[face]:
            fid_name = fiducials_amr[face][fid]
            mask_mom = Mask_and_Moments(fid_name, scale=0.001 * u.K)
            mask_mom.make_moments()
            mask_mom.make_moment_errors()

            save_name = os.path.splitext(os.path.basename(fid_name))[0]
            mask_mom.to_fits(os.path.join(amrmoments_path, save_name))

# Now run the distances AMR vs. none.
statistics = copy(statistics_list)
statistics.append("DeltaVariance_Centroid_Curve")
statistics.append("DeltaVariance_Centroid_Slope")

print "Statistics to run: %s" % (statistics)
num_statistics = len(statistics)

for face in faces:
def reduce_and_save(filename,
                    add_noise=False,
                    regrid_linewidth=False,
                    rms_noise=0.001 * u.K,
                    output_path="",
                    cube_output=None,
                    nsig=3,
                    slicewise_noise=True):
    '''
    Load the cube in and derive the property arrays.
    '''

    if add_noise or regrid_linewidth:

        sc = SpectralCube.read(filename)

        if add_noise:
            if rms_noise is None:
                raise TypeError("Must specify value of rms noise.")

            cube = sc.filled_data[:].value

            # Optionally scale noise by 1/10th of the 98th percentile in the
            # cube
            if rms_noise == 'scaled':
                rms_noise = 0.1 * \
                    np.percentile(cube[np.isfinite(cube)], 98) * sc.unit

            from scipy.stats import norm
            if not slicewise_noise:
                cube += norm.rvs(0.0, rms_noise.value, cube.shape)
            else:
                spec_shape = cube.shape[0]
                slice_shape = cube.shape[1:]
                for i in range(spec_shape):
                    cube[i, :, :] += norm.rvs(0.0, rms_noise.value,
                                              slice_shape)

            sc = SpectralCube(data=cube * sc.unit,
                              wcs=sc.wcs,
                              meta={"BUNIT": "K"})

            mask = LazyMask(np.isfinite, sc)
            sc = sc.with_mask(mask)

        if regrid_linewidth:
            # Normalize the cubes to have the same linewidth
            # channels_per_sigma=20 scales to the largest mean line width in
            # SimSuite8 (~800 km/s; Design 22). So effectively everything is
            # "smoothed" to have this line width
            # Intensities are normalized by their 95% value.
            sc = preprocessor(sc,
                              min_intensity=nsig * rms_noise,
                              norm_intensity=True,
                              norm_percentile=95,
                              channels_per_sigma=20)

    else:
        sc = filename

    # Run the same signal masking procedure that was used for the
    # COMPLETE cubes
    if add_noise:
        # The default settings were set based on these cubes
        sc = make_signal_mask(sc)[0]

    reduc = Mask_and_Moments(sc, scale=rms_noise)
    if not add_noise:
        reduc.make_mask(mask=reduc.cube > nsig * reduc.scale)

    reduc.make_moments()
    reduc.make_moment_errors()

    # Remove .fits from filename
    save_name = os.path.splitext(os.path.basename(filename))[0]

    reduc.to_fits(os.path.join(output_path, save_name))

    # Save the noisy cube too
    if add_noise or regrid_linewidth:
        save_name += ".fits"
        if cube_output is None:
            sc.hdu.writeto(os.path.join(output_path, save_name))
        else:
            sc.hdu.writeto(os.path.join(cube_output, save_name))
예제 #6
0
fits2 = str(sys.argv[2])

# scale = float(sys.argv[3])

cube1 = SpectralCube.read(fits1)
cube2 = SpectralCube.read(fits2)

# Shorten the name for the plots
fits1 = os.path.basename(fits1)
fits2 = os.path.basename(fits2)

# Naive error estimation. Useful for only testing out the methods.
scale1 = cube1.std().value
scale2 = cube2.std().value

set1 = Mask_and_Moments(cube1, scale=scale1)
# mask = cube1 > sigma * set1.scale
# set1.make_mask(mask=mask)
set1.make_moments()
set1.make_moment_errors()
dataset1 = set1.to_dict()

set2 = Mask_and_Moments(cube2, scale=scale2)
# mask = cube2 > sigma * set2.scale
# set2.make_mask(mask=mask)
set2.make_moments()
set2.make_moment_errors()
dataset2 = set2.to_dict()

# Wavelet Transform