Ejemplo n.º 1
0
 def test_1d_mean(self):
     """Test 1D array with func=np.mean."""
     data = np.arange(4)
     block_size = 2.
     expected = block_reduce(data, block_size, func=np.sum) / block_size
     result_mean = block_reduce(data, block_size, func=np.mean)
     assert np.all(result_mean == expected)
Ejemplo n.º 2
0
 def test_1d_mean(self):
     """Test 1D array with func=np.mean."""
     data = np.arange(4)
     block_size = 2.
     expected = block_reduce(data, block_size, func=np.sum) / block_size
     result_mean = block_reduce(data, block_size, func=np.mean)
     assert np.all(result_mean == expected)
Ejemplo n.º 3
0
 def test_2d_mean(self):
     """Test 2D array with func=np.mean."""
     data = np.arange(4).reshape(2, 2)
     block_size = 2.
     expected = (block_reduce(data, block_size, func=np.sum) /
                 block_size**2)
     result = block_reduce(data, block_size, func=np.mean)
     assert np.all(result == expected)
Ejemplo n.º 4
0
 def test_2d_mean(self):
     """Test 2D array with func=np.mean."""
     data = np.arange(4).reshape(2, 2)
     block_size = 2.
     expected = (block_reduce(data, block_size, func=np.sum) /
                 block_size**2)
     result = block_reduce(data, block_size, func=np.mean)
     assert np.all(result == expected)
Ejemplo n.º 5
0
    def test_2d_trim(self):
        """
        Test trimming of 2D array when size is not perfectly divisible
        by block_size.
        """

        data1 = np.arange(15).reshape(5, 3)
        result1 = block_reduce(data1, 2)
        data2 = data1[0:4, 0:2]
        result2 = block_reduce(data2, 2)
        assert np.all(result1 == result2)
Ejemplo n.º 6
0
    def test_2d_trim(self):
        """
        Test trimming of 2D array when size is not perfectly divisible
        by block_size.
        """

        data1 = np.arange(15).reshape(5, 3)
        result1 = block_reduce(data1, 2)
        data2 = data1[0:4, 0:2]
        result2 = block_reduce(data2, 2)
        assert np.all(result1 == result2)
Ejemplo n.º 7
0
def Percentile_resolution(mapa, L, rho, PP, weights='None'):

    N = len(mapa)
    dx = L / N
    res = list((10**np.linspace(0, np.log10(N), 100) + 1e-5))
    res.sort()
    res = np.array(res).astype(int)
    res = np.array(list(set(res)))
    res.sort()

    res = np.array(list(set((N / res).astype(int))))
    res.sort()
    Per = np.zeros_like(res, dtype=float)

    if weights == 'None':
        for k, R in enumerate(res):
            fake = block_reduce(mapa, R, func=np.sum)
            N1 = len(fake)
            radial = radial_map(fake)
            fake = fake.ravel()
            radial = radial.ravel() * L / N1
            try:
                fake = fake[radial < rho]
                Per[k] = np.percentile(fake, PP)
            except:
                Per[k] = 0
    else:
        for k, R in enumerate(res):
            fake = block_reduce(mapa, R, func=np.sum)
            fake2 = block_reduce(mapa, R, func=np.sum)
            N1 = len(fake)
            radial = radial_map(fake)
            fake = fake.ravel()
            fake2 = fake2.ravel()
            radial = radial.ravel() * L / N1
            try:
                field = fake[radial < rho]
                mass = fake2[radial < rho]

                mass = mass[field.argsort()]
                field = field[field.argsort()]
                field = np.insert(field, 0, field[-1])
                mass = np.insert(mass, 0, 0)
                mass = np.cumsum(mass)
                mass /= mass[-1]
                fm = interp1d(mass, field)
                Per[k] = fm(PP * 0.01)
            except:
                Per[k] = 0

    return res, Per, dx
def Circulation_block_example(name):
    print(name)
    temp=name.split('/')[-1]
    temp=temp.split('.npy')[0]
    mapa=np.load(name)
    N=len(mapa)
    res=list((10**np.linspace(0,np.log10(N),100)+1e-5))
    res.sort()
    res=np.array(res).astype(int)
    res=np.array(list(set(res)))
    res.sort()

    res=np.array(list(set((N/res).astype(int))))
    res.sort()
    for R in [res[0]]:
        if os.path.isfile('Difussion/Block-'+temp+'-%05d'%R):

            print('File already exists')  #Skip code if file already exists

        else:
            original=np.load(name)
            fake=block_reduce(original, R, func=np.sum)
            del original
            Nf=len(fake)
            tabla=Table()
            tabla['circulation'] = Column(np.array(fake.ravel()),  description='vorticity')
            tabla.write('Difussion/Block-'+temp+'-%05d' %R,path='data',format='hdf5')
            del tabla
    return True
def Circulation_sigma(mapa):

    N=len(mapa)
    res=list((10**np.linspace(0,np.log10(N),100)+1e-5))
    res.sort()
    res=np.array(res).astype(int)
    res=np.array(list(set(res)))
    res.sort()

    res=np.array(list(set((N/res).astype(int))))
    res.sort()
    Negative=np.zeros_like(res,dtype=float)

    L=np.zeros_like(res,dtype=float)

    for k,R in enumerate(res):
        fake=block_reduce(mapa, R, func=np.sum)
        N1=len(fake)
        L[k]=N1
        radial=radial_map(fake)
        fake=fake.ravel()
        radial=radial.ravel()*4.0e4/N1
        try:
            fake=fake[radial<1.5e4]
            Negative[k]=np.percentile(fake,84)-np.percentile(fake,16)
        except:
            Negative[k]=0
    Negative[np.isnan(Negative)]=0


    return res, Negative, L
Ejemplo n.º 10
0
def Circulation_Negative(mapa):

    N=len(mapa)
    res=list((10**np.linspace(0,np.log10(N),100)+1e-5))
    res.sort()
    res=np.array(res).astype(int)
    res=np.array(list(set(res)))
    res.sort()

    res=np.array(list(set((N/res).astype(int))))
    res.sort()
    Negative=np.zeros_like(res,dtype=float)


    for k,R in enumerate(res):
        fake=block_reduce(mapa, R, func=np.sum)
        N1=len(fake)
        radial=radial_map(fake)
        fake=fake.ravel()
        radial=radial.ravel()*4.0e4/N1
        try:
            fake=fake[radial<1.5e4]
            Negative[k]=len(fake[fake<0])/len(fake)
        except:
            Negative[k]=0


    return res, Negative
Ejemplo n.º 11
0
def Circulation_Percentiles(mapa):

    N=len(mapa)
    res=list((10**np.linspace(0,np.log10(N),100)+1e-5))
    res.sort()
    res=np.array(res).astype(int)
    res=np.array(list(set(res)))
    res.sort()

    res=np.array(list(set((N/res).astype(int))))
    res.sort()
    P16=np.zeros_like(res,dtype=float)
    P25=np.zeros_like(res,dtype=float)
    P50=np.zeros_like(res,dtype=float)
    P75=np.zeros_like(res,dtype=float)
    P84=np.zeros_like(res,dtype=float)

    for k,R in enumerate(res):
        fake=block_reduce(mapa, R, func=np.sum)
        fake=fake.ravel()/R**2
        P16[k]=np.percentile(fake,16)
        P25[k]=np.percentile(fake,25)
        P50[k]=np.percentile(fake,50)
        P75[k]=np.percentile(fake,75)
        P84[k]=np.percentile(fake,84)

    return res,P16,P25,P50,P75,P84
Ejemplo n.º 12
0
    def set_data(self, label, data, inplace_ok=False):

        if 'clim' not in self.volumes[label]:
            raise ValueError("set_clim should be called before set_data")

        # Avoid adding the same data again
        if 'data' in self.volumes[
                label] and self.volumes[label]['data'] is data:
            return

        # Since outside this class we sometimes need to already copy the data
        # before passing it here, we allow the caller to specify inplace_ok=True
        # which means that it's ok to do the limits scaling in-place to avoid
        # another copy.

        if inplace_ok and data.dtype != np.float32:
            raise TypeError('data should be float32 if inplace_ok is set')

        # VisPy can't handle dimensions larger than 2048 so we need to reduce
        # the array on-the-fly if needed

        shape = np.asarray(data.shape)

        if np.any(shape > 2048):
            if self._initial_shape:
                self._block_size = np.ceil(shape / 2048).astype(int)
                data = block_reduce(data, self._block_size, func=np.mean)

        self.volumes[label]['data'] = data
        self.volumes[label]['inplace_ok'] = inplace_ok
        self._update_scaled_data(label)
Ejemplo n.º 13
0
def Percentile_profiles(mapa,PP):

    N=len(mapa)
    res=list((10**np.linspace(0,np.log10(N),100)+1e-5))
    res.sort()
    res=np.array(res).astype(int)
    res=np.array(list(set(res)))
    res.sort()

    res=np.array(list(set((N/res).astype(int))))
    res.sort()

    N1=len(res)
    N2=len(PP)
    Per=np.zeros((N1,N2),dtype=float)


    for k,R in enumerate(res):
        fake=block_reduce(mapa, R, func=np.sum)/R**2
        N1=len(fake)
        fake=fake.ravel()
        for j in range(N2):
            try:
                Per[k][j]=np.percentile(fake,PP[j])
            except:
                Per[k][j]=0


    return res, Per
Ejemplo n.º 14
0
def ndarray_to_png(x, min_percent=20, max_percent=99.5):
    shape = np.array(x.shape)
    # Reverse order for reasons I do not understand...
    shape = shape[::-1]
    if len(shape) != 2:
        return

    width = 600  # pixels
    downsample = (shape[0] // width) + 1

    if downsample > 1:
        x = block_reduce(x,
                         block_size=(downsample, downsample))

    norm = simple_norm(x,
                       min_percent=min_percent,
                       max_percent=max_percent,
                       clip=True)

    x = norm(x)
    # Replace NaNs with black pixels
    x = np.nan_to_num(x)
    img_buffer = BytesIO()
    mimg.imsave(img_buffer, x, format='png', cmap='gray')
    return img_buffer.getvalue()
Ejemplo n.º 15
0
def Negative_profile(mapa):

    N=len(mapa)
    res=list((10**np.linspace(0,np.log10(N),100)+1e-5))
    res.sort()
    res=np.array(res).astype(int)
    res=np.array(list(set(res)))
    res.sort()

    res=np.array(list(set((N/res).astype(int))))
    res.sort()
    Negative=np.zeros_like(res,dtype=float)
    Total=np.zeros_like(res,dtype=float)


    for k,R in enumerate(res):
        fake=block_reduce(mapa, R, func=np.sum)
        N1=len(fake)
        fake=fake.ravel()
        try:
            Negative[k]=len(fake[fake<0])/len(fake)
        except:
            Negative[k]=0
        Total[k]=len(fake)


    return res, Negative,Total
Ejemplo n.º 16
0
def Percentile_profile(mapa,PP):

    N=len(mapa)
    res=list((10**np.linspace(0,np.log10(N),100)+1e-5))
    res.sort()
    res=np.array(res).astype(int)
    res=np.array(list(set(res)))
    res.sort()

    res=np.array(list(set((N/res).astype(int))))
    res.sort()
    Per=np.zeros_like(res,dtype=float)


    for k,R in enumerate(res):
        fake=block_reduce(mapa, R, func=np.sum)
        N1=len(fake)
        fake=fake.ravel()
        try:

            Per[k]=np.percentile(fake,PP)
        except:
            Per[k]=0


    return res, Per
Ejemplo n.º 17
0
def Sum_resolution(mapa, L, rho):

    N = len(mapa)
    res = list((10**np.linspace(0, np.log10(N), 100) + 1e-5))
    res.sort()
    res = np.array(res).astype(int)
    res = np.array(list(set(res)))
    res.sort()
    dx = L / N
    res = np.array(list(set((N / res).astype(int))))
    res.sort()
    Sum = np.zeros_like(res, dtype=float)

    for k, R in enumerate(res):
        fake = block_reduce(mapa, R, func=np.sum)
        N1 = len(fake)
        radial = radial_map(fake)
        fake = fake.ravel()
        radial = radial.ravel() * L / N1
        try:
            fake = fake[radial < rho]
            Sum[k] = np.abs(fake).sum()
        except:
            Sum[k] = 0

    return res, Sum
Ejemplo n.º 18
0
    def set_data(self, label, data):

        if 'clim' not in self.volumes[label]:
            raise ValueError("set_clim should be called before set_data")

        # Get rid of NaN values
        data = np.nan_to_num(data)

        # VisPy can't handle dimensions larger than 2048 so we need to reduce
        # the array on-the-fly if needed

        shape = np.asarray(data.shape)

        if np.any(shape > 2048):
            if self._initial_shape:
                self._block_size = np.ceil(shape / 2048).astype(int)
                data = block_reduce(data, self._block_size, func=np.mean)

        self.volumes[label]['data'] = data
        self._update_scaled_data(label)
Ejemplo n.º 19
0
    def set_data(self, label, data):

        if 'clim' not in self.volumes[label]:
            raise ValueError("set_clim should be called before set_data")

        # Get rid of NaN values
        data = np.nan_to_num(data)

        # VisPy can't handle dimensions larger than 2048 so we need to reduce
        # the array on-the-fly if needed

        shape = np.asarray(data.shape)

        if np.any(shape > 2048):
            if self._initial_shape:
                self._block_size = np.ceil(shape / 2048).astype(int)
                data = block_reduce(data, self._block_size, func=np.mean)

        self.volumes[label]['data'] = data
        self._update_scaled_data(label)
Ejemplo n.º 20
0
def ndarray_to_png(x, min_percent=20, max_percent=99.5):
    shape = np.array(x.shape)
    # Reverse order for reasons I do not understand...
    shape = shape[::-1]
    if len(shape) != 2:
        return

    width = 600  # pixels
    downsample = (shape[0] // width) + 1
    scaled_data = scale_image(x,
                              min_percent=min_percent,
                              max_percent=max_percent)

    if downsample > 1:
        scaled_data = block_reduce(scaled_data,
                                   block_size=(downsample, downsample))

    img_buffer = BytesIO()
    mimg.imsave(img_buffer, scaled_data, format='png', cmap='gray')
    return img_buffer.getvalue()
Ejemplo n.º 21
0
def ndarray_to_png(x, min_percent=20, max_percent=99.5):
    shape = np.array(x.shape)
    # Reverse order for reasons I do not understand...
    shape = shape[::-1]
    if len(shape) != 2:
        return

    width = 600  # pixels
    downsample = (shape[0] // width) + 1

    if downsample > 1:
        x = block_reduce(x,
                         block_size=(downsample, downsample))

    norm = simple_norm(x,
                       min_percent=min_percent,
                       max_percent=max_percent)
    img_buffer = BytesIO()
    mimg.imsave(img_buffer, norm(x), format='png', cmap='gray')
    return img_buffer.getvalue()
Ejemplo n.º 22
0
def Block_Negative_radii(mapa, Nbins):
    N = len(mapa)
    res = list((10**np.linspace(0, np.log10(N), 100) + 1e-5))
    res.sort()
    res = np.array(res).astype(int)
    res = np.array(list(set(res)))
    res.sort()

    res = np.array(list(set((N / res).astype(int))))
    res.sort()

    radial = radial_map(mapa)
    radial = radial.ravel()

    if Nbins > 1:
        #redges = np.linspace(0,0.5*N,Nbins)
        #redges = np.insert(redges,len(redges),radial.max())
        redges = np.linspace(0, 0.5 * N * 0.75, Nbins + 1)
    else:
        #redges=np.array([0,radial.max()])
        redges = np.array([0, 0.5 * N * 0.75])
    rcen = 0.5 * (redges[:-1] + redges[1:])

    Negative = np.zeros((Nbins, len(res)))
    for k, R in enumerate(res):
        fake = block_reduce(mapa, R, func=np.sum)
        N1 = len(fake)
        radial = radial_map(fake)
        fake = fake.ravel()
        radial = radial.ravel() * N / N1

        for j in range(Nbins):
            ring = (redges[j] < radial) & (redges[j + 1] > radial)
            temp = fake[ring]
            try:
                Negative[j][k] = len(temp[temp < 0]) / len(temp)
            except:
                Negative[j][k] = 0.0
    return res, rcen, Negative, redges
Ejemplo n.º 23
0
def sigma_resolution(mapa):
    N=len(mapa)
    res=list((10**np.linspace(0,np.log10(N),100)+1e-5))
    res.sort()
    res=np.array(res).astype(int)
    res=np.array(list(set(res)))
    res.sort()

    res=np.array(list(set((N/res).astype(int))))
    res.sort()
    Sig=np.zeros_like(res,dtype=float)
    eta=2*0.6744897501

    for k,R in enumerate(res):
        fake=block_reduce(mapa, R, func=np.sum)
        fake=fake.ravel()
        try:
            #Sig[k]=(np.percentile(fake,75)-np.percentile(fake,25))/eta
            Sig[k]=np.std(fake)
        except:
            Sig[k]=0

    return res, Sig
Ejemplo n.º 24
0
 def test_2d(self):
     """Test 2D array."""
     data = np.arange(4).reshape(2, 2)
     expected = np.array([[6]])
     result = block_reduce(data, 2)
     assert np.all(result == expected)
Ejemplo n.º 25
0
 def test_block_size_len(self):
     """Test block_size length."""
     data = np.ones((2, 2))
     with pytest.raises(ValueError):
         block_reduce(data, (2, 2, 2))
Ejemplo n.º 26
0
def add_noise(infile,
              rms,
              mean=0,
              nu=0,
              outfile='',
              bin_img=1,
              bin_spec=1,
              flip_lr=False,
              flip_ud=False,
              log_scale=False,
              overwrite=False,
              verbose=False):
    """
	Function used to add gaussian noise to an image.
	rms must be given in K
	Noise can be added either to single images or cubes.
	"""
    from astropy.nddata.utils import block_reduce

    fname = sys._getframe().f_code.co_name
    start_time = time.time()

    # Detects whether input data comes from a file or an array
    if type(infile) == str:
        input_from_file = True
        header = fits.getheader(infile)
        data = fits.getdata(infile)

    elif isinstance(infile, (list, np.ndarray)):
        input_from_file = False
        data = np.array(infile)
        # Ensure data has at least one dimension
        data = np.array(data, ndmin=1)

    if bin_img < 1:
        bin_img = 1
    if bin_spec < 1:
        bin_spec = 1

    # Rescale header elements by binning factors
    if input_from_file:
        header['CDELT1'] *= bin_img
        header['CRPIX1'] /= bin_img
        header['CDELT2'] *= bin_img
        header['CRPIX2'] /= bin_img
        if 'CDELT3' in header:
            header['CDELT3'] *= bin_spec
            header['CRPIX3'] /= bin_spec

    # Bin the spectrum and image if required
    if bin_spec > 1 or bin_img > 1:
        print_(f"Original cube shape: {np.shape(data)}",
               fname,
               verbose=verbose)

        if data.ndim == 3:
            # bin spec
            if bin_spec > 1:
                data = block_reduce(data, [bin_spec, 1, 1], func=np.nanmean)
            if bin_img > 1:
                data = block_reduce(data, [1, bin_img, bin_img],
                                    func=np.nanmean)

        else:
            raise Exception(
                "[!] Input data does not have three dimensions, impossible to detect spectral axis."
            )

        print_(f"Binned cube shape: {np.shape(data)}", fname, verbose=verbose)

    # Validate rms parameter type
    if not isinstance(rms, (tuple)):
        raise ValueError(
            'Not a valid type for rms. Must be a tuple (val, "unit").')

    if rms[1].upper() != 'K':
        raise ValueError("rms must be in kelvin. Example: rms=(1,'K').")

    print_("Adding noise level of: %e [K] " % (rms[0]), fname, verbose=verbose)

    noisy_data = []

    print_("Adding Gaussian noise ...", fname, verbose=verbose)

    # Loop over the cube
    for idx, channel in enumerate(data):

        # Add gaussian noise to the image
        noise = np.random.normal(mean, rms[0], np.shape(channel))
        c = channel + noise

        # Flip image
        if flip_lr:
            c = np.fliplr(c)
        if flip_ud:
            c = np.flipud(c)
        if log_scale:
            c = np.log10(c)

        noisy_data.append(c)

    # Write data to fits file if required
    write_fits(outfile, noisy_data, header, overwrite, fname, verbose)

    # Print the time taken by the function
    elapsed_time(time.time() - start_time, fname, verbose)

    return np.array(noisy_data)
Ejemplo n.º 27
0
def lacosmic(data,
             contrast,
             cr_threshold,
             neighbor_threshold,
             error=None,
             mask=None,
             background=None,
             effective_gain=None,
             readnoise=None,
             maxiter=4,
             border_mode='mirror'):
    """
    Remove cosmic rays from an astronomical image using the `L.A.Cosmic
    <http://www.astro.yale.edu/dokkum/lacosmic/>`_ algorithm.  The
    algorithm is based on Laplacian edge detection and is described in
    `PASP 113, 1420 (2001)`_.

    .. _PASP 113, 1420 (2001):
        http://adsabs.harvard.edu/abs/2001PASP..113.1420V

    Parameters
    ----------
    data : array_like
        The 2D array of the image.

    contrast : float
        Contrast threshold between the Laplacian image and the
        fine-structure image.  If your image is critically sampled, use
        a value around 2.  If your image is undersampled (e.g. HST
        data), a value of 4 or 5 (or more) is more appropriate.  If your
        image is oversampled, use a value between 1 and 2.  For details,
        please see `PASP 113, 1420 (2001)`_, which calls this parameter
        :math:`f_{\mbox{lim}}`.  In particular, Figure 4 shows the
        approximate relationship between the ``contrast`` parameter and
        the pixel full-width half-maximum of stars in your image.

    cr_threshold : float
        The Laplacian signal-to-noise ratio threshold for cosmic-ray
        detection.

    neighbor_threshold : float
        The Laplacian signal-to-noise ratio threshold for detection of
        cosmic rays in pixels neighboring the initially-identified
        cosmic rays.

    error : array_like, optional
        The pixel-wise Gaussian 1-sigma errors of the input ``data``.
        If ``error`` is not input, then ``effective_gain`` and
        ``readnoise`` will be used to construct an approximate model of
        the ``error``.  If ``error`` is input, it will override the
        ``effective_gain`` and ``readnoise`` parameters.  ``error`` must
        have the same shape as ``data``.

    mask : array_like (bool), optional
        A boolean mask, with the same shape as ``data``, where a `True`
        value indicates the corresponding element of ``data`` is masked.
        Masked pixels are ignored when identifying cosmic rays.  It is
        highly recommended that saturated stars be included in ``mask``.

    background : float or array_like, optional
        The background level previously subtracted from the input
        ``data``.  ``background`` may either be a scalar value or a 2D
        image with the same shape as the input ``data``.  If the input
        ``data`` has not been background-subtracted, then set
        ``background=None`` (default).

    effective_gain : float, array-like, optional
        Ratio of counts (e.g., electrons or photons) to the units of
        ``data``.  For example, if your input ``data`` are in units of
        ADU, then ``effective_gain`` should represent electrons/ADU.  If
        your input ``data`` are in units of electrons/s then
        ``effective_gain`` should be the exposure time (or an exposure
        time map).  ``effective_gain`` and ``readnoise`` must be
        specified if ``error`` is not input.

    readnoise : float, optional
        The read noise (in electrons) in the input ``data``.
        ``effective_gain`` and ``readnoise`` must be specified if
        ``error`` is not input.

    maxiter : float, optional
        The maximum number of iterations.  The default is 4.  The
        routine will automatically exit if no additional cosmic rays are
        identified.  If the routine is still identifying cosmic rays
        after four iterations, then you are likely digging into sources
        (e.g. saturated stars) and/or the noise.  In that case, try
        inputing a ``mask`` or increasing the value of ``cr_threshold``.

    border_mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
        The mode in which the array borders are handled during
        convolution and median filtering.  For 'constant', the value is
        0.  The default is 'mirror', which matches the original
        L.A.Cosmic algorithm.

    Returns
    -------
    cleaned_image : `~numpy.ndarray`
        The cosmic-ray cleaned image.

    crmask : `~numpy.ndarray` (bool)
        A mask image of the identified cosmic rays.  Cosmic-ray pixels
        have a value of `True`.
    """

    from scipy import ndimage
    block_size = 2.0
    kernel = np.array([[0.0, -1.0, 0.0], [-1.0, 4.0, -1.0], [0.0, -1.0, 0.0]])

    clean_data = data.copy()
    if background is not None:
        clean_data += background
    final_crmask = np.zeros(data.shape, dtype=bool)

    if error is not None:
        if data.shape != error.shape:
            raise ValueError('error and data must have the same shape')
    clean_error_image = error

    ncosmics, ncosmics_tot = 0, 0
    for iteration in range(maxiter):
        sampled_img = block_replicate(clean_data, block_size)
        convolved_img = ndimage.convolve(sampled_img, kernel,
                                         mode=border_mode).clip(min=0.0)
        laplacian_img = block_reduce(convolved_img, block_size)

        if clean_error_image is None:
            if effective_gain is None or readnoise is None:
                raise AssertionError('effective_gain and readnoise must be '
                                     'input if error is not input')
            med5_img = ndimage.median_filter(clean_data,
                                             size=5,
                                             mode=border_mode).clip(min=1.e-5)
            error_image = (np.sqrt(effective_gain * med5_img + readnoise**2) /
                           effective_gain)
        else:
            error_image = clean_error_image

        snr_img = laplacian_img / (block_size * error_image)
        # this is used to remove extended structures (larger than ~5x5)
        snr_img -= ndimage.median_filter(snr_img, size=5, mode=border_mode)

        # used to remove compact bright objects
        med3_img = ndimage.median_filter(clean_data, size=3, mode=border_mode)
        med7_img = ndimage.median_filter(med3_img, size=7, mode=border_mode)
        finestruct_img = ((med3_img - med7_img) / error_image).clip(min=0.01)

        cr_mask1 = snr_img > cr_threshold
        # NOTE: to follow the paper exactly, this condition should be
        # "> contrast * block_size".  "lacos_im.cl" uses simply "> contrast"
        cr_mask2 = (snr_img / finestruct_img) > contrast
        cr_mask = cr_mask1 * cr_mask2
        if mask is not None:
            cr_mask = np.logical_and(cr_mask, ~mask)

        # grow cosmic rays by one pixel and check in snr_img
        selem = np.ones((3, 3))
        neigh_mask = ndimage.binary_dilation(cr_mask, selem)
        cr_mask = cr_mask1 * neigh_mask
        # now grow one more pixel and lower the detection threshold
        neigh_mask = ndimage.binary_dilation(cr_mask, selem)
        cr_mask = (snr_img > neighbor_threshold) * neigh_mask

        # previously unknown cosmic rays found in this iteration
        crmask_new = np.logical_and(~final_crmask, cr_mask)
        ncosmics = np.count_nonzero(crmask_new)

        final_crmask = np.logical_or(final_crmask, cr_mask)
        ncosmics_tot += ncosmics
        log.info('Iteration {0}: Found {1} cosmic-ray pixels, '
                 'Total: {2}'.format(iteration + 1, ncosmics, ncosmics_tot))
        if ncosmics == 0:
            if background is not None:
                clean_data -= background
            return clean_data, final_crmask
        clean_data = _clean_masked_pixels(clean_data,
                                          final_crmask,
                                          size=5,
                                          exclude_mask=mask)

    if background is not None:
        clean_data -= background
    return clean_data, final_crmask
Ejemplo n.º 28
0
def make_lens_sys(lensmodel, src_camera_kwargs, source_info, kwargs_band_src,
                  lens_info):
    #Model
    kwargs_model_postit = {
        'lens_model_list': ['SIE'],
        'source_light_model_list': ['INTERPOL']
    }
    kwargs_lens = lensmodel['kwargs_lens_list']  # SIE model

    #data
    numpix = len(source_info['image']) * source_info['HR_factor']
    kwargs_source_mag = [{
        'magnitude':
        source_info['magnitude'],
        'image':
        source_info['image'],
        'scale':
        source_info['deltapix'] / source_info['HR_factor'],
        'phi_G':
        0,
        'center_x':
        lensmodel['source_shift'][0],
        'center_y':
        lensmodel['source_shift'][1]
    }]  #phi_G is to rotate, centers are to shift in arcsecs
    sim = SimAPI(numpix=numpix,
                 kwargs_single_band=kwargs_band_src,
                 kwargs_model=kwargs_model_postit)
    kwargs_numerics = {'supersampling_factor': source_info['HR_factor']}
    imSim = sim.image_model_class(kwargs_numerics)
    _, kwargs_source, _ = sim.magnitude2amplitude(
        kwargs_source_mag=kwargs_source_mag)

    #simulation
    image_HD = imSim.image(kwargs_lens=kwargs_lens,
                           kwargs_source=kwargs_source)

    mag_LS = -2.5 * np.log10(sum(sum(image_HD))) + source_info['zero_point']
    magnification = source_info[
        'magnitude'] - mag_LS  #rough estimation in the magnitude change

    #DES - PSF convolution
    npsf = inter_psf(lens_info['psf'], lens_info['deltapix'],
                     source_info['deltapix'])
    source_conv = signal.fftconvolve(image_HD, npsf, mode='same')

    #resize in deltapix
    source_lensed_res = block_reduce(
        source_conv, source_info['HR_factor'])  #in case an HR_factor was used
    eq_pa = int(
        lens_info['deltapix'] * len(lens_info['image']) *
        len(source_lensed_res) /
        (source_info['deltapix'] * len(source_info['image']))
    )  #this value is to consider a source image of the same size in arcseconds as the lens
    bs = np.zeros([eq_pa, eq_pa])
    val = int((len(bs) - len(source_lensed_res)) / 2)
    bs[val:val + len(source_lensed_res),
       val:val + len(source_lensed_res)] = source_lensed_res
    source_size_scale = block_reduce(bs,
                                     int(len(bs) / len(lens_info['image'])))

    #flux rescale
    flux_img = sum(image_HD.flatten()) * (10**(
        0.4 * (lens_info['zero_point'] - source_info['zero_point'])))
    sc = sum(source_conv.flatten()) / flux_img
    source_scaled = source_size_scale / sc

    #cut right pixels size
    lens_size = min(np.shape(lens_info['image']))
    source_size = min(np.shape(source_scaled))

    if lens_size > source_size:
        xin = yin = int((lens_size - source_size) / 2)
        lens_final = lens_info['image'][xin:xin + source_size,
                                        yin:yin + source_size]
        source_final = source_scaled

    if lens_size < source_size:
        xin = yin = math.ceil((source_size - lens_size) / 2)
        source_final = source_scaled[xin:xin + lens_size, yin:yin + lens_size]
        lens_final = lens_info['image']
    else:
        lens_final = lens_info['image']
        source_final = source_scaled
    final = lens_final + source_final

    phot_ap = 2
    _, mag_sim = ap_phot(
        len(final) / 2,
        len(final) / 2, final, phot_ap / (lens_info['deltapix']),
        phot_ap / (lens_info['deltapix']), np.pi / 2., lens_info['zero_point'])

    #in the old code some images cause problems with some inf pixels... this will discard that system
    if magnification == np.float('-inf') or magnification == np.float('inf'):
        print('INFINITE MAGNIFICATION')
        magnification = 10000

    return {
        'simulation': final,
        'src_image': source_info['image'],
        'mag_sim': mag_sim,
        'mag_lensed_src': mag_LS,
        'image_HD': image_HD,
        'resize': source_size_scale,
        'conv': source_conv,
        'magnification': magnification
    }
Ejemplo n.º 29
0
 plt.savefig('/scratch/dw1519/galex/data/star_photon/' + outdir +
             '/flat_in.pdf',
             dpi=190)
 plt.clf()
 scans = [
     '0023', '0212', '0302', '0464', '0500', '0689', '0806', '0941', '1247',
     '1508', '1778', '2120', '2291', '2453', '2759', '2930', '3155', '3227',
     '3326', '3497'
 ]
 date = '08-21-2017'  #'08-21-2017'
 data = np.zeros((size, size))
 flat = np.load('/scratch/dw1519/galex/data/star_photon/' + outdir +
                '/flat0.npy')
 data = np.load('/scratch/dw1519/galex/data/star_photon/' + outdir +
                '/data.npy')
 data = block_reduce(data, resample)
 N = 10
 for i in range(1, N):
     model = np.zeros((size, size))
     A_list = []
     for scan in scans:
         with open('../name_scan/%s' % scan) as f:
             name_list = f.read().splitlines()
         print name_list
         if i == 0:
             data += np.load(
                 '/scratch/dw1519/galex/data/star_photon/back/' + scan +
                 '_data.npy')
         exp = np.load('/scratch/dw1519/galex/data/star_photon/back/' +
                       scan + '_exp.npy')
         if len(exp.shape) < 2:
Ejemplo n.º 30
0
def lacosmic(data, contrast, cr_threshold, neighbor_threshold,
             error=None, mask=None, background=None, effective_gain=None,
             readnoise=None, maxiter=4, border_mode='mirror'):
    """
    Remove cosmic rays from an astronomical image using the `L.A.Cosmic
    <http://www.astro.yale.edu/dokkum/lacosmic/>`_ algorithm.  The
    algorithm is based on Laplacian edge detection and is described in
    `PASP 113, 1420 (2001)`_.

    .. _PASP 113, 1420 (2001):
        http://adsabs.harvard.edu/abs/2001PASP..113.1420V

    Parameters
    ----------
    data : array_like
        The 2D array of the image.

    contrast : float
        Contrast threshold between the Laplacian image and the
        fine-structure image.  If your image is critically sampled, use
        a value around 2.  If your image is undersampled (e.g. HST
        data), a value of 4 or 5 (or more) is more appropriate.  If your
        image is oversampled, use a value between 1 and 2.  For details,
        please see `PASP 113, 1420 (2001)`_, which calls this parameter
        :math:`f_{\\mbox{lim}}`.  In particular, Figure 4 shows the
        approximate relationship between the ``contrast`` parameter and
        the pixel full-width half-maximum of stars in your image.

    cr_threshold : float
        The Laplacian signal-to-noise ratio threshold for cosmic-ray
        detection.

    neighbor_threshold : float
        The Laplacian signal-to-noise ratio threshold for detection of
        cosmic rays in pixels neighboring the initially-identified
        cosmic rays.

    error : array_like, optional
        The pixel-wise Gaussian 1-sigma errors of the input ``data``.
        If ``error`` is not input, then ``effective_gain`` and
        ``readnoise`` will be used to construct an approximate model of
        the ``error``.  If ``error`` is input, it will override the
        ``effective_gain`` and ``readnoise`` parameters.  ``error`` must
        have the same shape as ``data``.

    mask : array_like (bool), optional
        A boolean mask, with the same shape as ``data``, where a `True`
        value indicates the corresponding element of ``data`` is masked.
        Masked pixels are ignored when identifying cosmic rays.  It is
        highly recommended that saturated stars be included in ``mask``.

    background : float or array_like, optional
        The background level previously subtracted from the input
        ``data``.  ``background`` may either be a scalar value or a 2D
        image with the same shape as the input ``data``.  If the input
        ``data`` has not been background-subtracted, then set
        ``background=None`` (default).

    effective_gain : float, array-like, optional
        Ratio of counts (e.g., electrons or photons) to the units of
        ``data``.  For example, if your input ``data`` are in units of
        ADU, then ``effective_gain`` should represent electrons/ADU.  If
        your input ``data`` are in units of electrons/s then
        ``effective_gain`` should be the exposure time (or an exposure
        time map).  ``effective_gain`` and ``readnoise`` must be
        specified if ``error`` is not input.

    readnoise : float, optional
        The read noise (in electrons) in the input ``data``.
        ``effective_gain`` and ``readnoise`` must be specified if
        ``error`` is not input.

    maxiter : float, optional
        The maximum number of iterations.  The default is 4.  The
        routine will automatically exit if no additional cosmic rays are
        identified.  If the routine is still identifying cosmic rays
        after four iterations, then you are likely digging into sources
        (e.g. saturated stars) and/or the noise.  In that case, try
        inputing a ``mask`` or increasing the value of ``cr_threshold``.

    border_mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
        The mode in which the array borders are handled during
        convolution and median filtering.  For 'constant', the value is
        0.  The default is 'mirror', which matches the original
        L.A.Cosmic algorithm.

    Returns
    -------
    cleaned_image : `~numpy.ndarray`
        The cosmic-ray cleaned image.

    crmask : `~numpy.ndarray` (bool)
        A mask image of the identified cosmic rays.  Cosmic-ray pixels
        have a value of `True`.
    """

    block_size = 2.0
    kernel = np.array([[0.0, -1.0, 0.0], [-1.0, 4.0, -1.0], [0.0, -1.0, 0.0]])

    clean_data = data.copy()
    if background is not None:
        clean_data += background
    final_crmask = np.zeros(data.shape, dtype=bool)

    if error is not None:
        if data.shape != error.shape:
            raise ValueError('error and data must have the same shape')
    clean_error_image = error

    ncosmics, ncosmics_tot = 0, 0
    for iteration in range(maxiter):
        sampled_img = block_replicate(clean_data, block_size)
        convolved_img = ndimage.convolve(sampled_img, kernel,
                                         mode=border_mode).clip(min=0.0)
        laplacian_img = block_reduce(convolved_img, block_size)

        if clean_error_image is None:
            if effective_gain is None or readnoise is None:
                raise ValueError('effective_gain and readnoise must be '
                                 'input if error is not input')
            med5_img = ndimage.median_filter(clean_data, size=5,
                                             mode=border_mode).clip(min=1.e-5)
            error_image = (np.sqrt(effective_gain*med5_img + readnoise**2) /
                           effective_gain)
        else:
            error_image = clean_error_image

        snr_img = laplacian_img / (block_size * error_image)
        # this is used to remove extended structures (larger than ~5x5)
        snr_img -= ndimage.median_filter(snr_img, size=5, mode=border_mode)

        # used to remove compact bright objects
        med3_img = ndimage.median_filter(clean_data, size=3, mode=border_mode)
        med7_img = ndimage.median_filter(med3_img, size=7, mode=border_mode)
        finestruct_img = ((med3_img - med7_img) / error_image).clip(min=0.01)

        cr_mask1 = snr_img > cr_threshold
        # NOTE: to follow the paper exactly, this condition should be
        # "> contrast * block_size".  "lacos_im.cl" uses simply "> contrast"
        cr_mask2 = (snr_img / finestruct_img) > contrast
        cr_mask = cr_mask1 * cr_mask2
        if mask is not None:
            cr_mask = np.logical_and(cr_mask, ~mask)

        # grow cosmic rays by one pixel and check in snr_img
        selem = np.ones((3, 3))
        neigh_mask = ndimage.binary_dilation(cr_mask, selem)
        cr_mask = cr_mask1 * neigh_mask
        # now grow one more pixel and lower the detection threshold
        neigh_mask = ndimage.binary_dilation(cr_mask, selem)
        cr_mask = (snr_img > neighbor_threshold) * neigh_mask

        # previously unknown cosmic rays found in this iteration
        crmask_new = np.logical_and(~final_crmask, cr_mask)
        ncosmics = np.count_nonzero(crmask_new)

        final_crmask = np.logical_or(final_crmask, cr_mask)
        ncosmics_tot += ncosmics
        log.info('Iteration {0}: Found {1} cosmic-ray pixels, '
                 'Total: {2}'.format(iteration + 1, ncosmics, ncosmics_tot))
        if ncosmics == 0:
            if background is not None:
                clean_data -= background
            return clean_data, final_crmask
        clean_data = _clean_masked_pixels(clean_data, final_crmask, size=5,
                                          exclude_mask=mask)

    if background is not None:
        clean_data -= background
    return clean_data, final_crmask
Ejemplo n.º 31
0
 def test_block_size_broadcasting(self):
     """Test scalar block_size broadcasting."""
     data = np.arange(16).reshape(4, 4)
     result1 = block_reduce(data, 2)
     result2 = block_reduce(data, (2, 2))
     assert np.all(result1 == result2)
Ejemplo n.º 32
0
def show_image(image,
               percl=99,
               percu=None,
               is_mask=False,
               figsize=(6, 10),
               cmap='viridis',
               log=False,
               show_colorbar=True,
               show_ticks=True,
               fig=None,
               ax=None,
               input_ratio=None):
    """
    Show an image in matplotlib with some basic astronomically-appropriat stretching.

    Parameters
    ----------
    image
        The image to show
    percl : number
        The percentile for the lower edge of the stretch (or both edges if ``percu`` is None)
    percu : number or None
        The percentile for the upper edge of the stretch (or None to use ``percl`` for both)
    figsize : 2-tuple
        The size of the matplotlib figure in inches
    """
    if percu is None:
        percu = percl
        percl = 100 - percl

    if (fig is None and ax is not None) or (fig is not None and ax is None):
        raise ValueError('Must provide both "fig" and "ax" '
                         'if you provide one of them')
    elif fig is None and ax is None:
        fig, ax = plt.subplots(1, 1, figsize=figsize)
        if figsize is not None:
            # Rescale the fig size to match the image dimensions, roughly
            image_aspect_ratio = image.shape[0] / image.shape[1]
            figsize = (max(figsize) * image_aspect_ratio, max(figsize))
            print(figsize)

    # To preserve details we should *really* downsample correctly and
    # not rely on matplotlib to do it correctly for us (it won't).

    # So, calculate the size of the figure in pixels, block_reduce to
    # roughly that,and display the block reduced image.

    # Thanks, https://stackoverflow.com/questions/29702424/how-to-get-matplotlib-figure-size
    fig_size_pix = fig.get_size_inches() * fig.dpi

    ratio = (image.shape // fig_size_pix).max()

    if ratio < 1:
        ratio = 1

    ratio = input_ratio or ratio

    # Divide by the square of the ratio to keep the flux the same in the
    # reduced image
    reduced_data = block_reduce(image, ratio) / ratio**2

    # Of course, now that we have downsampled, the axis limits are changed to
    # match the smaller image size. Setting the extent will do the trick to
    # change the axis display back to showing the actual extent of the image.
    extent = [0, image.shape[1], 0, image.shape[0]]

    if log:
        stretch = aviz.LogStretch()
    else:
        stretch = aviz.LinearStretch()

    norm = aviz.ImageNormalize(reduced_data,
                               interval=aviz.AsymmetricPercentileInterval(
                                   percl, percu),
                               stretch=stretch)

    if is_mask:
        # The image is a mask in which pixels are zero or one. Set the image scale
        # limits appropriately.
        scale_args = dict(vmin=0, vmax=1)
    else:
        scale_args = dict(norm=norm)

    im = ax.imshow(reduced_data,
                   origin='lower',
                   cmap=cmap,
                   extent=extent,
                   aspect='equal',
                   **scale_args)

    if show_colorbar:
        # I haven't a clue why the fraction and pad arguments below work to make
        # the colorbar the same height as the image, but they do....unless the image
        # is wider than it is tall. Sticking with this for now anyway...
        # Thanks: https://stackoverflow.com/a/26720422/3486425
        fig.colorbar(im, ax=ax, fraction=0.046, pad=0.04, format='%2.0f')
        # In case someone in the future wants to improve this:
        # https://joseph-long.com/writing/colorbars/
        # https://stackoverflow.com/a/33505522/3486425
        # https://matplotlib.org/mpl_toolkits/axes_grid/users/overview.html#colorbar-whose-height-or-width-in-sync-with-the-master-axes

    if not show_ticks:
        ax.tick_params(labelbottom=False,
                       labelleft=False,
                       labelright=False,
                       labeltop=False)
Ejemplo n.º 33
0
 def test_block_size_len(self):
     """Test block_size length."""
     data = np.ones((2, 2))
     with pytest.raises(ValueError):
         block_reduce(data, (2, 2, 2))
Ejemplo n.º 34
0
sigma_v = (p75 - p25) / eta
"""
Creating the resolution array which are integers factors
"""
res = list((10**np.linspace(0, np.log10(N), 100) + 1e-5))
res.sort()
res = np.array(res).astype(int)
res = np.array(list(set(res)))
res.sort()
res = np.array(list(set((N / res).astype(int))))
res.sort()

stds = np.zeros_like(res, dtype=float)
serr = np.zeros_like(res, dtype=float)
for k, R in enumerate(res):
    fake = block_reduce(difference_map, R, func=np.sum) / R**2
    lf = len(fake.ravel())
    stds[k] = np.std(fake.ravel())
stds[-1] = 4 * stds[-2]
serr = 1.5 * stds * res / res.max()
sve = 0.25 * kms_to_pcmyr * 2 * DX / res**1.5
stot = np.sqrt(sve**2 + serr**2)
stot = np.maximum(stot, 0.2 * stds)

resolutions = ''
sigma_array = ''
error_array = ''
for sa, re, se in zip(stds, res * DX, stot):
    sigma_array += ' ' + str(sa)
    resolutions += ' ' + str(re)
    error_array += ' ' + str(se)
 frac = cut/full
 frac[np.isnan(frac)] = 0.
 flat = frac*flat
 print flat.shape
 np.save('/scratch/dw1519/galex/data/star_photon/'+outdir+'/flat_in.npy', flat)
 plt.imshow(flat, vmin=0, vmax=1.5)
 plt.colorbar()
 plt.savefig('/scratch/dw1519/galex/data/star_photon/'+outdir+'/flat_in.pdf', dpi=190)
 plt.clf()
 scans = ['0023', '0212', '0302', '0464', '0500', '0689', '0806', '0941', '1247', '1508', '1778', 
         '2120', '2291', '2453','2759', '2930', '3155', '3227', '3326', '3497']
 date = '08-21-2017'#'08-21-2017'
 data = np.zeros((size,size))
 flat = np.load('/scratch/dw1519/galex/data/star_photon/'+outdir+'/flat2.npy')
 data = np.load('/scratch/dw1519/galex/data/star_photon/'+outdir+'/data.npy')
 data = block_reduce(data, resample)
 N=10
 for i in range(3, N):
     model = np.zeros((size,size))
     A_list = []
     for scan in scans:
         with open('../name_scan/%s'%scan) as f:
             name_list = f.read().splitlines()
         print name_list
         if i==0:
             data += np.load('/scratch/dw1519/galex/data/star_photon/back/'+scan+'_data.npy')
         exp = np.load('/scratch/dw1519/galex/data/star_photon/back/'+scan+'_exp.npy')
         if len(exp.shape)<2:
             exp = np.concatenate(exp,axis=0)
         print exp.shape
         star_list = np.load('/scratch/dw1519/galex/data/star_photon/back/'+scan+'_bstar.npy')
Ejemplo n.º 36
0
def lacosmic(data, contrast, cr_threshold, neighbor_threshold,
             error=None, mask=None, background=None, effective_gain=None,
             readnoise=None, maxiter=4, border_mode='mirror'):

    block_size = 2.0
    kernel = np.array([[0.0, -1.0, 0.0], [-1.0, 4.0, -1.0], [0.0, -1.0, 0.0]])

    clean_data = data.copy()
    if background is not None:
        clean_data += background
    final_crmask = np.zeros(data.shape, dtype=bool)

    if error is not None:
        if data.shape != error.shape:
            raise ValueError('error and data must have the same shape')
    clean_error_image = error

    ncosmics, ncosmics_tot = 0, 0
    for iteration in range(maxiter):
        sampled_img = block_replicate(clean_data, block_size)
        convolved_img = ndimage.convolve(sampled_img, kernel,
                                         mode=border_mode).clip(min=0.0)
        laplacian_img = block_reduce(convolved_img, block_size)

        if clean_error_image is None:
            if effective_gain is None or readnoise is None:
                raise ValueError('effective_gain and readnoise must be '
                                 'input if error is not input')
            med5_img = ndimage.median_filter(clean_data, size=5,
                                             mode=border_mode).clip(min=1.e-5)
            error_image = (np.sqrt(effective_gain*med5_img + readnoise**2) /
                           effective_gain)
        else:
            error_image = clean_error_image

        snr_img = laplacian_img / (block_size * error_image)
        # this is used to remove extended structures (larger than ~5x5)
        snr_img -= ndimage.median_filter(snr_img, size=5, mode=border_mode)

        # used to remove compact bright objects
        med3_img = ndimage.median_filter(clean_data, size=3, mode=border_mode)
        med7_img = ndimage.median_filter(med3_img, size=7, mode=border_mode)
        finestruct_img = ((med3_img - med7_img) / error_image).clip(min=0.01)

        cr_mask1 = snr_img > cr_threshold
        # NOTE: to follow the paper exactly, this condition should be
        # "> contrast * block_size".  "lacos_im.cl" uses simply "> contrast"
        cr_mask2 = (snr_img / finestruct_img) > contrast
        cr_mask = cr_mask1 * cr_mask2
        if mask is not None:
            cr_mask = np.logical_and(cr_mask, ~mask)

        # grow cosmic rays by one pixel and check in snr_img
        selem = np.ones((3, 3))
        neigh_mask = ndimage.binary_dilation(cr_mask, selem)
        cr_mask = cr_mask1 * neigh_mask
        # now grow one more pixel and lower the detection threshold
        neigh_mask = ndimage.binary_dilation(cr_mask, selem)
        cr_mask = (snr_img > neighbor_threshold) * neigh_mask

        # previously unknown cosmic rays found in this iteration
        crmask_new = np.logical_and(~final_crmask, cr_mask)
        ncosmics = np.count_nonzero(crmask_new)

        final_crmask = np.logical_or(final_crmask, cr_mask)
        ncosmics_tot += ncosmics
        if (ncosmics_tot>0):
          log.info('Iteration {0}: Found {1} cosmic-ray pixels, '
                  'Total: {2}'.format(iteration + 1, ncosmics, ncosmics_tot))
    cr_image = 1*final_crmask*255
    return cr_image,ncosmics_tot
Ejemplo n.º 37
0
def apply_beam(infile,
               fwhm,
               pa=0,
               nu=0,
               outfile='',
               bin_img=1,
               bin_spec=1,
               eff=1,
               telescope='APEX',
               pixel_size=None,
               output_Tmb=False,
               flip_lr=False,
               flip_ud=False,
               log_scale=False,
               overwrite=False,
               verbose=False):
    """
	Function used to convolve images by single-beams.
	This does not apply for interferometers, in such case, use simobserve instead.

	Bin the image if required (i.e. bin_size!=0).
	Perform convolution with a gaussian beam
	Note: is better to implement convolve_fft() instead of 
	convolve() since it works faster for large kernels (n>500)
	Also rescale intensities from Jy/px to Jy/beam by the (1.331*(fwhm**2)/(pixel_size**2)) factor
	"""
    from astropy.convolution import Gaussian2DKernel, convolve, convolve_fft
    from astropy.nddata.utils import block_reduce

    fname = sys._getframe().f_code.co_name
    start_time = time.time()

    # Detects whether input data comes from a file or an array
    if type(infile) == str:
        header = fits.getheader(infile)
        data = fits.getdata(infile)
        input_from_file = True

    elif isinstance(infile, (list, np.ndarray)):
        input_from_file = False
        data = np.array(infile)
        # Ensure data has at least one dimension
        data = np.array(data, ndmin=1)

    else:
        input_from_file = False

    # Assure binning factors are not negative
    bin_img = 1 if bin_img < 1 else bin_img
    bin_spec = 1 if bin_spec < 1 else bin_spec

    # Rescale header elements by binning factors
    if input_from_file:
        header['CDELT1'] *= bin_img
        header['CRPIX1'] /= bin_img
        header['CDELT2'] *= bin_img
        header['CRPIX2'] /= bin_img
        if 'CDELT3' in header:
            header['CDELT3'] *= bin_spec
            header['CRPIX3'] /= bin_spec

    # Bin the spectrum and image if required
    if bin_spec > 1 or bin_img > 1:
        print_(f"Original cube shape: {np.shape(data)}",
               fname,
               verbose=verbose)

        if data.ndim == 3:
            data = block_reduce(data, [bin_spec, 1, 1],
                                func=np.nanmean) if bin_spec > 1 else data
            data = block_reduce(data, [1, bin_img, bin_img],
                                func=np.nanmean) if bin_img > 1 else data

        else:
            raise Exception(
                "[!] Input data does not have three dimensions, impossible to detect spectral axis."
            )

        print_(f"Binned cube shape: {np.shape(data)}", fname, verbose=verbose)

    # Obtain the pixel size in arcseconds. Function argument pixel_size (in arcsec) overrides all other options.
    if pixel_size in [0, None]:

        # Read from the header
        if input_from_file:
            pixel_size = np.float64(np.abs(header['CDELT1'])) * (u.deg).to(
                u.arcsec)

        # Ask the user to enter it
        else:
            pixel_size = float(input("Enter pixel size in arcsec: "))

    # Convert fwhm to numpy array. If a scalar, use it both as bmin and bmaj.
    fwhm = np.array(fwhm) if np.size(fwhm) > 1 else np.array([fwhm, fwhm])

    # Convert FWHM["] --> FWHM[px]
    fwhm_pix = fwhm / pixel_size
    fwhm_to_std = np.sqrt(8 * np.log(2))
    sigma = fwhm_pix / fwhm_to_std

    # Create the Gaussian Kernel
    kernel = Gaussian2DKernel(x_stddev=sigma[0],
                              y_stddev=sigma[1],
                              theta=pa * u.deg.to(u.rad)).array

    print_(f'Data shape: {np.shape(data)}', fname, verbose=verbose)
    print_(f'Kernel shape: {np.shape(kernel)}', fname, verbose=verbose)
    print_(f'Convolving ...', fname, verbose=verbose)

    convolved_data = []

    # Loop over the cube
    for idx, channel in enumerate(data):

        # Use Fast Fourier Transform only for arrays of side-length larger than 500.
        convolver_string = "Using FFT ..." if len(
            channel) < 500 else "Not using FFT ..."
        if idx == 0: print_(convolver_string, fname, verbose=verbose)

        # Convolve de the image.
        c = convolve(channel, kernel) if len(channel) < 500 else convolve_fft(
            channel, kernel)

        # Rescale intensity (Jy/px) --> (Jy/beam)
        # Rescaling factor: Output_beam_area / Input_beam_area
        # Output_beam area = Area of a Gassian beam
        # Area of a Gaussian beam: 2*pi/(8*ln(2)) * (FWHM_maj*FWHM_min) = 1.133 * FWHM**2

        rescaling_factor = 1.1331 * (fwhm[0] * fwhm[1]) / (pixel_size**2)

        c = c * rescaling_factor

        # Convert intensities (Jy/beam) to brightness temperature (K)
        Tmb = brightness_temperature(c, unit='Jy/beam', nu=nu,
                                     fwhm=fwhm) if output_Tmb else c

        # Rescale by the telescope efficiency if indicated ()
        if eff != 1: Tmb = (1 / eff) * Tmb

        # Flip lef-right if indicated
        if flip_lr: Tmb = np.fliplr(Tmb)

        # Flip upside-down if indicated
        if flip_ud: Tmb = np.flipud(Tmb)

        # Convert to logscale if indicated
        if log_scale: Tmb = np.log10(Tmb)

        convolved_data.append(Tmb)

    # Write additional keywords to header
    if input_from_file:
        header['BTYPE'] = 'Tmb' if output_Tmb else 'intensity'
        header['BTYPE'] = 'K' if output_Tmb else 'Jy/beam'
        header['BMIN'] = fwhm[0] * (u.arcsec).to(u.deg)
        header['BMAJ'] = fwhm[1] * (u.arcsec).to(u.deg)
        header['BPA'] = pa
        header['TELESCOP'] = str(telescope)

    # Write data to fits file if required
    write_fits(outfile, convolved_data, header, overwrite, fname, verbose)

    # Print the time taken by the function
    elapsed_time(time.time() - start_time, fname, verbose)

    return np.array(convolved_data)
Ejemplo n.º 38
0
 def test_1d(self):
     """Test 1D array."""
     data = np.arange(4)
     expected = np.array([1, 5])
     result = block_reduce(data, 2)
     assert np.all(result == expected)
Ejemplo n.º 39
0
 def test_1d(self):
     """Test 1D array."""
     data = np.arange(4)
     expected = np.array([1, 5])
     result = block_reduce(data, 2)
     assert np.all(result == expected)
Ejemplo n.º 40
0
 def test_block_size_broadcasting(self):
     """Test scalar block_size broadcasting."""
     data = np.arange(16).reshape(4, 4)
     result1 = block_reduce(data, 2)
     result2 = block_reduce(data, (2, 2))
     assert np.all(result1 == result2)
        name_list = ['0023', '0032', '0203', '0239', '0446', '0464', '0473', '0806', '0815', '1301', '1310', '1319',\
                    '1616', '1634', '1679', '2174', '2183', '2192', '2714', '2750', '3236', '3245', '3281']
        name_list = ['2561']
        name_list = ['0050']
        date='11-07-2017'
        date1='08-21-2017/0001-0500'
        factor = 4
        for name in name_list:
            #name = re.split('/', name)[3]
            if name == 'name2444-2480':
                continue
            print name
            count = pyfits.open('../fits/scan_map/'+date1+'/count_map_%s_count.fits'%name)
            exp = pyfits.open('../fits/scan_map/'+date+'/count_map_%s_exp.fits'%name)

            cm = block_reduce(count[0].data, factor)
            em = block_reduce(exp[0].data, factor, np.mean)

            w = WCS('../fits/scan_map/'+date1+'/count_map_%s_count.fits'%name)
            wn = get_resampled_wcs(w,factor,True)
            header = wn.to_header()

            hdu = pyfits.PrimaryHDU(em)
            hdu.header = count[0].header
            hdu.header['NAXIS1'] = em.shape[1]#header['NAXIS1']
            hdu.header['NAXIS2'] = em.shape[0]#header['NAXIS2']
            hdu.header['CRPIX1'] = header['CRPIX1']
            hdu.header['CRPIX2'] = header['CRPIX2']
            hdu.header['CDELT1'] = header['CDELT1']
            hdu.header['CDELT2'] = header['CDELT2']
            hdulist = pyfits.HDUList([hdu])
Ejemplo n.º 42
0
 def test_2d(self):
     """Test 2D array."""
     data = np.arange(4).reshape(2, 2)
     expected = np.array([[6]])
     result = block_reduce(data, 2)
     assert np.all(result == expected)