示例#1
0
    def compute_transform(self, show_progress=True, scale_normalization=True,
                          use_pyfftw=False, threads=1, pyfftw_kwargs={}):
        '''
        Compute the wavelet transform at each scale.

        Parameters
        ----------
        show_progress : bool, optional
            Show a progress bar during the creation of the covariance matrix.
        scale_normalization: bool, optional
            Compute the transform with the correct scale-invariant
            normalization.
        use_pyfftw : bool, optional
            Enable to use pyfftw, if it is installed.
        threads : int, optional
            Number of threads to use in FFT when using pyfftw.
        pyfftw_kwargs : Passed to
            See `here <http://hgomersall.github.io/pyFFTW/pyfftw/builders/builders.html>`_
            for a list of accepted kwargs.
        '''

        if use_pyfftw:
            if PYFFTW_FLAG:
                use_fftn = fftn
                use_ifftn = ifftn
            else:
                warn("pyfftw not installed. Using numpy.fft functions.")
                use_fftn = np.fft.fftn
                use_ifftn = np.fft.ifftn
        else:
            use_fftn = np.fft.fftn
            use_ifftn = np.fft.ifftn

        n0, m0 = self.data.shape
        A = len(self.scales)

        self._Wf = np.zeros((A, n0, m0), dtype=np.float)

        factor = 2
        if not scale_normalization:
            factor = 4
            Warning("Transform values are only reliable with the proper scale"
                    " normalization. When disabled, the slope of the transform"
                    " CANNOT be used for physical interpretation.")

        pix_scales = self._to_pixel(self.scales).value

        if show_progress:
            bar = ProgressBar(len(pix_scales))

        for i, an in enumerate(pix_scales):
            psi = MexicanHat2DKernel(an)

            self._Wf[i] = \
                convolve_fft(self.data, psi, normalize_kernel=False,
                             fftn=use_fftn, ifftn=use_ifftn).real * \
                an**factor

            if show_progress:
                bar.update(i + 1)
示例#2
0
    def compute_transform(self, scale_normalization=True):
        '''
        Compute the wavelet transform at each scale.

        Parameters
        ----------
        scale_normalization: bool, optional
            Compute the transform with the correct scale-invariant
            normalization.

        '''

        n0, m0 = self.data.shape
        A = len(self.scales)

        self._Wf = np.zeros((A, n0, m0), dtype=np.float)

        factor = 2
        if not scale_normalization:
            factor = 4
            Warning("Transform values are only reliable with the proper scale"
                    " normalization. When disabled, the slope of the transform"
                    " CANNOT be used for physical interpretation.")

        pix_scales = self._to_pixel(self.scales).value

        for i, an in enumerate(pix_scales):
            psi = MexicanHat2DKernel(an)

            self._Wf[i] = \
                convolve_fft(self.data, psi, normalize_kernel=False).real * \
                an**factor
示例#3
0
def filter_direct(wavelength_range, image):

    # Make the Mexican hat kernel and convolve
    # The values given to the kernels should be 1.7328, 2.3963, 3.5270
    # for S, M and L.
    mex = MexicanHat2DKernel(get_pixFWHM(wavelength_range))
    mex_convol = convolve(image, mex, boundary='extend')

    return mex_convol
示例#4
0
    def _detect_specimens(self,
                          reconstructed_wave,
                          propagation_distance,
                          margin=100,
                          kernel_radius=4.0,
                          save_png_to_disk=None):
        cropped_img = reconstructed_wave.phase[margin:-margin, margin:-margin]
        best_convolved_phase = convolve_fft(cropped_img,
                                            MexicanHat2DKernel(kernel_radius))

        best_convolved_phase_copy = best_convolved_phase.copy(order='C')

        # Find positive peaks
        blob_doh_kwargs = dict(threshold=0.00007, min_sigma=2, max_sigma=10)
        blobs = blob_doh(best_convolved_phase_copy, **blob_doh_kwargs)

        # Find negative peaks
        negative_phase = -best_convolved_phase_copy
        negative_phase += (np.median(best_convolved_phase_copy) -
                           np.median(negative_phase))
        negative_blobs = blob_doh(negative_phase, **blob_doh_kwargs)

        all_blobs = []
        for blob in blobs:
            if blob.size > 0:
                all_blobs.append(blob)

        for neg_blob in negative_blobs:
            if neg_blob.size > 0:
                all_blobs.append(neg_blob)

        if len(all_blobs) > 0:
            all_blobs = np.vstack(all_blobs)

        # If save pngs:
        if save_png_to_disk is not None:
            path = "{0}/{1:.4f}.png".format(save_png_to_disk,
                                            propagation_distance)
            save_scaled_image(reconstructed_wave.phase, path, margin,
                              all_blobs)

        # Blobs get returned in rows with [x, y, radius], so save each
        # set of blobs with the propagation distance to record z

        # correct blob positions for margin:
        all_blobs = np.float64(all_blobs)
        if len(all_blobs) > 0:
            all_blobs[:, 0] += margin
            all_blobs[:, 1] += margin
            all_blobs[:, 2] = propagation_distance
            return all_blobs
        else:
            return None
def main():
    wavelength_range = sys.argv[1]
    print wavelength_range
    above, fits_data = library.get_data(wavelength_range)
    image = above * fits_data

    # Create mask without NaN
    m_array = library.create_marray(fits_data)

    # Make the Mexican hat kernel and convolve
    # The values given to the kernels should be 1.7328, 2.3963, 3.5270
    # for S, M and L.
    mex = MexicanHat2DKernel(1.7328)
    mex_convol = convolve(image, mex, boundary='extend')
    m_mex = ma.masked_array(mex_convol, ~m_array.mask)
    #c_mex = np.multiply(w, m_array.mask)

    # Make the gaussian kernel and convolve
    gauss = Gaussian2DKernel(stddev=1.7328)
    gauss_convol = convolve(image, gauss, boundary='extend')
    m_gauss = ma.masked_array(gauss_convol, ~m_array.mask)
    #c_gauss = np.multiply(z, m_array.mask)

    # Plot the figures; the min and the max values are reset in some of them
    # for visualization purposes.
    pixels = None
    fig, main_axes = plt.subplots(1,
                                  3,
                                  sharex=True,
                                  sharey=True,
                                  figsize=(15, 5))
    #main_axes[0][0].imshow(mex, origin="lower", interpolation="None")
    #main_axes[0][0].set_title("MexHat kernel")


    main_axes[0].imshow(m_mex,\
                           origin="lower", interpolation="None")
    main_axes[0].set_title("Convolution with MexHat")

    main_axes[1].imshow(m_gauss, origin="lower", interpolation="None")
    main_axes[1].set_title("Convolution with gaussian")

    main_axes[2].imshow(image, origin="lower", interpolation="None")
    main_axes[2].set_title("Data")

    plt.show()

    mean_data = (np.nanmax(fits_data) - np.nanmin(fits_data)) / 2
    print "Mean", mean_data, "\nMax", np.nanmax(fits_data),\
          "\nMin", np.nanmin(fits_data)

    return 0
def test_deprecated_hat():

    # 'MexicanHat' was deprecated as a name for the kernels which are now
    # 'RickerWavelet'. This test ensures that the kernels are correctly
    # deprecated, and can be imported from the top-level package.

    from astropy.convolution import MexicanHat1DKernel, MexicanHat2DKernel

    with pytest.warns(AstropyDeprecationWarning):
        MexicanHat1DKernel(2)

    with pytest.warns(AstropyDeprecationWarning):
        MexicanHat2DKernel(2)
示例#7
0
    def __init__(self, n_scale, min_scale, step_scale, old=False):
        self.n_scale = n_scale
        self.min_scale = min_scale
        self.step_scale = step_scale
        self.scales = np.array([min_scale * step_scale ** _ for _ in range(n_scale)],
                               dtype=float)

        self.kern_base = dict()
        for idx_scale, scale in enumerate(self.scales):
            if old:
                self.kern_base[idx_scale] = difference_of_gauss_kernel(scale, step_scale)
            else:
                self.kern_base[idx_scale] = MexicanHat2DKernel(scale * step_scale).array

        max_scale = min_scale * step_scale ** n_scale
        self.kern_approx = Gaussian2DKernel(max_scale).array
示例#8
0
def filter(wavelength_range, filepath):
    #print wavelength_range
    above, fits_data = get_data_path(wavelength_range, filepath)
    image = above * fits_data

    # Create mask without NaN
    m_array = create_marray(fits_data)

    # Make the Mexican hat kernel and convolve
    # The values given to the kernels should be 1.7328, 2.3963, 3.5270
    # for S, M and L.
    mex = MexicanHat2DKernel(get_pixFWHM(wavelength_range))
    mex_convol = convolve(image, mex, boundary='extend')
    m_mex = ma.masked_array(mex_convol, ~m_array.mask)

    # Make the gaussian kernel and convolve
    gauss = Gaussian2DKernel(stddev=get_pixFWHM(wavelength_range))
    gauss_convol = convolve(image, gauss, boundary='extend')
    m_gauss = ma.masked_array(gauss_convol, ~m_array.mask)
    #c_gauss = np.multiply(z, m_array.mask)
    return m_mex
示例#9
0
def test_nikamap_match_filter(nms):

    nm = nms
    mf_nm = nm.match_filter(nm.beam)

    x_idx = np.floor(nm.x + 0.5).astype(int)
    y_idx = np.floor(nm.y + 0.5).astype(int)

    npt.assert_allclose(mf_nm.data[y_idx, x_idx],
                        nm.data[y_idx, x_idx],
                        atol=1e-2,
                        rtol=1e-1)
    npt.assert_allclose((nm.beam.fwhm * np.sqrt(2)).to(u.arcsec),
                        mf_nm.beam.fwhm.to(u.arcsec))

    mh_nm = nm.match_filter(
        MexicanHat2DKernel(nm.beam.fwhm_pix.value * gaussian_fwhm_to_sigma))
    npt.assert_allclose(mh_nm.data[y_idx, x_idx],
                        nm.data[y_idx, x_idx],
                        atol=1e-2,
                        rtol=1e-1)
    assert mh_nm.beam.fwhm is None
示例#10
0
    def compute_transform(self):
        '''
        Compute the wavelet transform at each scale.
        '''

        n0, m0 = self.data.shape
        A = len(self.scales)

        self.Wf = np.zeros((A, n0, m0), dtype=np.float)

        factor = 2
        if not self.scale_normalization:
            factor = 4
            Warning("Transform values are only reliable with the proper scale"
                    " normalization. When disabled, the slope of the transform"
                    " CANNOT be used for physical interpretation.")

        for i, an in enumerate(self.scales.value):
            psi = MexicanHat2DKernel(an)

            self.Wf[i] = \
                convolve_fft(self.data, psi).real * an**factor
示例#11
0
import numpy as np
import pywt
import cv2
from astropy.stats import sigma_clip
from astropy.convolution import convolve, AiryDisk2DKernel,Box2DKernel,Gaussian2DKernel,MexicanHat2DKernel
from astropy.convolution import Ring2DKernel,Tophat2DKernel,TrapezoidDisk2DKernel
import myroutines as myr
from util import standard


kernels = [AiryDisk2DKernel(3),Box2DKernel(5),Gaussian2DKernel(2),Gaussian2DKernel(4),MexicanHat2DKernel(2)
           ,MexicanHat2DKernel(4),Tophat2DKernel(2),TrapezoidDisk2DKernel(2),Ring2DKernel(7,2)]
kernel_names = ['AiryDisk3','Box5','Gaussian2','Gaussian4',\
'MexicanHat2','MexicanHat4','Tophat2','TrapezoidDisk2','Ring']

#wts = ['db38','sym20','coif17','bior2.8','bior3.9',\
#'bior4.4','bior5.5','bior6.8','dmey','rbio1.5',\
#'rbio2.8','rbio6.8']
wts = ['db38','sym20','coif17','dmey']

def wavelet(data, wlf, threshold):
	"""
	wavelet: this function .
	
	Arguments:
		data (numpy array): input data.
		wlf: wavelet fucntion.
		threshold: threshold of high pass filter.
		
	--------
	Returns:
示例#12
0
def main():

    # Variables for the filename
    path = "/home/abeelen/Herschel/DDT_mustdo_5/PLCK_SZ_G004.5-19.6-1/"
    filename = "OD1271_0x50012da8L_SpirePhotoLargeScan_PLCK_SZ_G004.5-19.6-1_destriped_PMW.fits"

    # Load the data
    fits_data = fits.getdata(path + filename, "image")

    # Create masked array from fits_data
    m_array = create_marray(fits_data)

    # Make the Mexican hat kernel and convolve
    # The values given to the kernels should be 1.7328, 2.3963, 3.5270
    # for S, M and L.
    mex = MexicanHat2DKernel(2.3963)
    w = convolve(fits_data, mex, boundary='extend')
    m_mex = ma.masked_array(w, m_array.mask)
    #c_mex = np.multiply(w, m_array.mask)

    # Make the gaussian kernel and convolve
    gauss = Gaussian2DKernel(stddev=3)
    z = convolve(fits_data, gauss, boundary='extend')
    m_gauss = ma.masked_array(w, m_array.mask)
    #c_gauss = np.multiply(z, m_array.mask)

    # Plot the figures; the min and the max values are reset in some of them
    # for visualization purposes.
    #pixels = None
    #fig, main_axes = plt.subplots(2,2)
    #main_axes[0][0].imshow(mex, origin="lower", interpolation="None")
    #main_axes[0][0].set_title("MexHat kernel")

    #main_axes[0][1].imshow(m_mex, vmin=-0.1, vmax=0.05,\
    #                       origin="lower", interpolation="None")
    #main_axes[0][1].set_title("Convolution with MexHat")

    #main_axes[1][0].imshow(m_gauss, origin="lower", interpolation="None")
    #main_axes[1][0].set_title("Convolution with gaussian")

    #main_axes[1][1].imshow(fits_data, origin="lower", interpolation="None")
    #main_axes[1][1].set_title("Data")

    #plt.show()

    mean_data = (np.nanmax(fits_data) - np.nanmin(fits_data)) / 2
    print "Mean", mean_data, "\nMax", np.nanmax(fits_data),\
          "\nMin", np.nanmin(fits_data)

    ## Plotting the histogram
    print "Histogram part"
    mex_1d = fits_data.ravel()
    print np.amax(mex_1d)
    histo = mex_1d[np.nonzero(mex_1d)]
    indices = histo < 0
    histo[indices] = 0
    indices = histo > 0.004
    histo[indices] = 0
    print histo
    #bin_values, bin_boundaries = np.histogram(mex_1d[~np.isnan(mex_1d)], 200)
    plt.hist(histo, 200)
    plt.show()

    #ma.compressed()
    #header = fits_data
    #print header
    return 0
示例#13
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.convolution import Gaussian2DKernel, MexicanHat2DKernel, convolve
import os

# Variables for the filename
path = "/home/abeelen/Herschel/DDT_mustdo_5/PLCK_SZ_G004.5-19.6-1/"
filename = "OD1271_0x50012da8L_SpirePhotoLargeScan_PLCK_SZ_G004.5-19.6-1_destriped_PMW.fits"

# Load the data
fits_data = fits.getdata(path+filename,"image")

# Make the Mexican hat filter
mex = MexicanHat2DKernel(2, x_size=5, y_size=5)
z = convolve(fits_data, mex, boundary = 'None')

# Make the gaussian filter
#gauss = Gaussian2DKernel(stddev=2)
#z = convolve(fits_data, gauss, boundary='extend')

# Plot the figure
pixels = None
fig, main_axes = plt.subplots()
#main_axes.imshow(fits_data, origin="lower", interpolation="None")
main_axes.imshow(z, origin="lower", interpolation="None")
plt.show()

#header = fits_data
#print header
示例#14
0
# 256x256 grid
y = np.arange(-128, 128)
x = np.arange(-128, 128)

k = np.fft.fftfreq(256)
l = np.fft.fftfreq(256)

width = 10.

turb_hat = Mexican_hat()
psi = turb_hat.psi(y / width, x / width) / (2 * np.pi * width**4.)
psi_ft = turb_hat.psi_ft(2 * np.pi * width * k,
                         2 * np.pi * width * l) / (2 * np.pi * width**4.)

apy_hat = MexicanHat2DKernel(10, x_size=256, y_size=256)

# p.ioff()

# p.subplot(131)
# p.title("Astropy")
# p.imshow(apy_hat)
# p.subplot(132)
# p.title("Turb")
# p.imshow(psi)
# p.subplot(133)
# p.title("Difference")
# p.imshow(apy_hat.array - psi)
# p.show()

p.subplot(131)
示例#15
0
    def compute_transform(self, show_progress=True, scale_normalization=True,
                          keep_convolved_arrays=False, convolve_kwargs={},
                          use_pyfftw=False, threads=1, pyfftw_kwargs={}):
        '''
        Compute the wavelet transform at each scale.

        Parameters
        ----------
        show_progress : bool, optional
            Show a progress bar during the creation of the covariance matrix.
        scale_normalization: bool, optional
            Compute the transform with the correct scale-invariant
            normalization.
        keep_convolved_arrays: bool, optional
            Keep the image convolved at all wavelet scales. For large images,
            this can require a large amount memory. Default is False.
        convolve_kwargs : dict, optional
            Passed to `~astropy.convolution.convolve_fft`.
        use_pyfftw : bool, optional
            Enable to use pyfftw, if it is installed.
        threads : int, optional
            Number of threads to use in FFT when using pyfftw.
        pyfftw_kwargs : Passed to
            See `here <http://hgomersall.github.io/pyFFTW/pyfftw/builders/builders.html>`_
            for a list of accepted kwargs.
        '''

        if use_pyfftw:
            if PYFFTW_FLAG:
                use_fftn = fftn
                use_ifftn = ifftn
            else:
                warn("pyfftw not installed. Using numpy.fft functions.")
                use_fftn = np.fft.fftn
                use_ifftn = np.fft.ifftn
        else:
            use_fftn = np.fft.fftn
            use_ifftn = np.fft.ifftn

        n0, m0 = self.data.shape
        A = len(self.scales)

        if keep_convolved_arrays:
            self._Wf = np.zeros((A, n0, m0), dtype=np.float)
        else:
            self._Wf = None

        self._values = np.empty_like(self.scales.value)
        self._stddev = np.empty_like(self.scales.value)

        factor = 2
        if not scale_normalization:
            factor = 4
            Warning("Transform values are only reliable with the proper scale"
                    " normalization. When disabled, the slope of the transform"
                    " CANNOT be used for physical interpretation.")

        pix_scales = self._to_pixel(self.scales).value

        if show_progress:
            bar = ProgressBar(len(pix_scales))

        for i, an in enumerate(pix_scales):
            psi = MexicanHat2DKernel(an)

            conv_arr = \
                convolve_fft(self.data, psi, normalize_kernel=False,
                             fftn=use_fftn, ifftn=use_ifftn,
                             nan_treatment='fill',
                             preserve_nan=True,
                             **convolve_kwargs).real * \
                an**factor

            if keep_convolved_arrays:
                self._Wf[i] = conv_arr

            self._values[i] = (conv_arr[conv_arr > 0]).mean()

            # The standard deviation should take into account the number of
            # kernel elements at that scale.
            kern_area = np.ceil(0.5 * np.pi * np.log(2) * an**2).astype(int)
            nindep = np.sqrt(np.isfinite(conv_arr).sum() // kern_area)

            self._stddev[i] = (conv_arr[conv_arr > 0]).std() / nindep

            if show_progress:
                bar.update(i + 1)