Esempio n. 1
0
def boxSmooth(img, width, sigma):
    """Box-smooth an image.  Only the edges of the box are included.

    @param img     The image to smooth
    @param width   The width of the box
    @param sigma   The 'sigma' of the smoothing Gaussian

    @return smth   The smoothed image

    This is a cheap (separable) ring smooth.
    """
    hwidth = width / 2.0
    k = 2 * int(hwidth + 3.0 * sigma) + 1
    kk1 = np.arange(k) - k // 2 + hwidth
    kk2 = np.arange(k) - k // 2 - hwidth
    box1 = (1.0 / np.sqrt(2.0 * np.pi)) * np.exp(-kk1 * kk1 / (2.0 * sigma))
    box2 = (1.0 / np.sqrt(2.0 * np.pi)) * np.exp(-kk2 * kk2 / (2.0 * sigma))
    box = box1 + box2

    w = (kk1 > 0) & (kk2 < 0)
    line = box.copy()
    line[w] = (1.0 / np.sqrt(2.0 * np.pi))

    box /= box.sum()
    line /= line.sum()

    mode = 'reflect'
    out0 = filt.correlate1d(img, box, mode=mode)
    out1 = filt.correlate1d(out0, line, mode=mode, axis=0)
    out2 = filt.correlate1d(img, box, mode=mode, axis=0)
    out3 = filt.correlate1d(out2, line, mode=mode)
    smth = out1 + out3
    return smth
Esempio n. 2
0
def bandpass(image, lshort, llong, threshold=None, truncate=4):
    """Remove noise and background variation.

    Convolve with a Gaussian to remove short-wavelength noise and subtract out
    long-wavelength variations, retaining features of intermediate scale.

    This implementation relies on scipy.ndimage.filters.gaussian_filter, and it
    is the fastest way known to the authors of performing a bandpass in
    Python.

    Parameters
    ----------
    image : ndarray
    lshort : small-scale cutoff (noise)
    llong : large-scale cutoff
    for both lshort and llong:
        give a tuple value for different sizes per dimension
        give int value for same value for all dimensions
        when 2*lshort >= llong, no noise filtering is applied
    threshold : float or integer
        By default, 1 for integer images and 1/256. for float images.

    Returns
    -------
    result : array
        the bandpassed image

    See Also
    --------
    legacy_bandpass, legacy_bandpass_fftw
    """
    lshort = validate_tuple(lshort, image.ndim)
    llong = validate_tuple(llong, image.ndim)
    if np.any([x * 2 >= y for (x, y) in zip(lshort, llong)]):
        raise ValueError("The smoothing length scale must be more" +
                         "than twice the noise length scale.")
    if threshold is None:
        if np.issubdtype(image.dtype, np.integer):
            threshold = 1
        else:
            threshold = 1 / 256.
    boxcar = image.copy()
    result = np.array(image, dtype=np.float)
    for axis, (sigma, smoothing) in enumerate(zip(lshort, llong)):
        if smoothing > 1:
            uniform_filter1d(boxcar,
                             2 * smoothing + 1,
                             axis,
                             output=boxcar,
                             mode='nearest',
                             cval=0)
        if sigma > 0:
            correlate1d(result,
                        gaussian_kernel(sigma, truncate),
                        axis,
                        output=result,
                        mode='constant',
                        cval=0.0)
    result -= boxcar
    return np.where(result > threshold, result, 0)
Esempio n. 3
0
def boxSmooth(img, width, sigma):
    """Box-smooth an image.  Only the edges of the box are included.

    @param img     The image to smooth
    @param width   The width of the box
    @param sigma   The 'sigma' of the smoothing Gaussian

    @return smth   The smoothed image

    This is a cheap (separable) ring smooth.
    """
    hwidth = width/2.0
    k     = 2*int(hwidth + 3.0*sigma) + 1
    kk1   = np.arange(k) - k//2 + hwidth
    kk2   = np.arange(k) - k//2 - hwidth
    box1  = (1.0/np.sqrt(2.0*np.pi))*np.exp(-kk1*kk1/(2.0*sigma))
    box2  = (1.0/np.sqrt(2.0*np.pi))*np.exp(-kk2*kk2/(2.0*sigma))
    box   = box1 + box2

    w = (kk1 > 0) & (kk2 < 0)
    line  = box.copy()
    line[w] = (1.0/np.sqrt(2.0*np.pi))

    box /= box.sum()
    line /= line.sum()
    
    mode = 'reflect'
    out0 = filt.correlate1d(img, box, mode=mode)
    out1 = filt.correlate1d(out0, line, mode=mode, axis=0)
    out2 = filt.correlate1d(img, box, mode=mode, axis=0)
    out3 = filt.correlate1d(out2, line, mode=mode)
    smth = out1 + out3
    return smth
Esempio n. 4
0
def lowpass(image, sigma=1, truncate=4):
    """Remove noise by convolving with a Gaussian.

    Convolve with a Gaussian to remove short-wavelength noise.

    The lowpass implementation relies on scipy.ndimage.filters.gaussian_filter,
    and it is the fastest way known to the authors of performing a bandpass in
    Python.

    Parameters
    ----------
    image : ndarray
    sigma : number or tuple, optional
        Size of the gaussian kernel with which the image is convolved.
        Provide a tuple for different sizes per dimension. Default 1.
    truncate : number, optional
        Determines the truncation size of the convolution kernel. Default 4.

    Returns
    -------
    result : array
        the processed image, as float

    See Also
    --------
    bandpass
    """
    sigma = validate_tuple(sigma, image.ndim)
    result = np.array(image, dtype=np.float)
    for axis, _sigma in enumerate(sigma):
        if _sigma > 0:
            correlate1d(result, gaussian_kernel(_sigma, truncate), axis,
                        output=result, mode='constant', cval=0.0)
    return result
Esempio n. 5
0
def gaussian_filter(input,
                    sigma,
                    order=0,
                    output=None,
                    mode="reflect",
                    cval=0.0,
                    truncate=4.0):
    input = np.asarray(input)
    output = _ni_support._get_output(output, input)
    orders = _ni_support._normalize_sequence(order, input.ndim)
    sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
    modes = _ni_support._normalize_sequence(mode, input.ndim)
    axes = list(range(input.ndim))
    axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii])
            for ii in range(len(axes)) if sigmas[ii] > 1e-15]
    if len(axes) > 0:
        for axis, sigma, order, mode in axes:
            sd = float(sigma)
            # make the radius of the filter equal to truncate standard deviations
            lw = int(truncate * sd + 0.5)
            # Since we are calling correlate, not convolve, revert the kernel
            weights = _gaussian_kernel1d(sigma, order, lw)[::-1]

            if input.ndim == 3 and axis == 0:
                weights[weights.size // 2 + 1:] = 0

            correlate1d(input, weights, axis, output, mode, cval, 0)
            input = output
    else:
        output[...] = input[...]
    return output
Esempio n. 6
0
def _estimateNoiseLevel(imgray):
    """
    Estimates the noise level of the given one-channel image.
    
    This code is adapted from
    http://www.mathworks.com/matlabcentral/fileexchange/36941-fast-noise-estimation-in-images
    
    The original description follows:
    
        by Tolga Birdal
        
        This is an extremely simple m-file which implements the method described
        in : J. Immerkaer, "Fast Noise Variance Estimation", Computer Vision and
        Image Understanding, Vol. 64, No. 2, pp. 300-302, Sep. 1996
                
        The advantage of this method is that it includes a Laplacian operation 
        which is almost insensitive to image structure but only depends on the 
        noise in the image. 
    """    
    h,w = imgray.shape[0], imgray.shape[1]
    
    # compute sum of absolute values of Laplacian
    kernel = np.array([1,-2,1])
    conv = correlate1d(imgray.astype(np.float), kernel, axis=0)
    conv = correlate1d(conv, kernel, axis=1)
    sigma = np.sum(np.abs(conv))
    
    # scale sigma with proposed coefficients
    sigma = sigma*sqrt(0.5*pi)/(6*(w-2)*(h-2))
    return sigma
 def gaussian_bur_utils(self, img, kernel=None):
     if kernel is None:
         kernel = self.create_gaussian_kernel()
     temp = np.asarray(img).astype(np.float)
     temp = correlate1d(temp, kernel, axis=0)
     temp = correlate1d(temp, kernel, axis=1)
     return temp
     pass
Esempio n. 8
0
    def getPointSourcePatch(self, px, py, minval=0., modelMask=None, **kwargs):
        from scipy.ndimage.filters import correlate1d
        from astrometry.util.miscutils import get_overlapping_region

        img = self.getImage(px, py)

        H,W = img.shape
        ix = int(np.round(px))
        iy = int(np.round(py))
        dx = px - ix
        dy = py - iy
        x0 = ix - W/2
        y0 = iy - H/2

        if modelMask is not None:
            mh,mw = modelMask.shape
            mx0,my0 = modelMask.x0, modelMask.y0

            # print 'PixelizedPSF + modelMask'
            # print 'mx0,my0', mx0,my0, '+ mw,mh', mw,mh
            # print 'PSF image x0,y0', x0,y0, '+ W,H', W,H

            if (mx0 >= x0 + W or
                my0 >= y0 + H or
                mx0 + mw <= x0 or
                my0 + mh <= y0):
                # No overlap
                return None
            # Otherwise, we'll just produce the Lanczos-shifted PSF image as usual,
            # and then copy it into the modelMask space.

        L = self.Lorder
        Lx = lanczos_filter(L, np.arange(-L, L+1) + dx)
        Ly = lanczos_filter(L, np.arange(-L, L+1) + dy)
        # Normalize the Lanczos interpolants (preserve flux)
        Lx /= Lx.sum()
        Ly /= Ly.sum()
        sx      = correlate1d(img, Lx, axis=1, mode='constant')
        shifted = correlate1d(sx,  Ly, axis=0, mode='constant')
        if modelMask is None:
            return Patch(x0, y0, shifted)

        # Pad or clip to modelMask size
        mm = np.zeros((mh,mw), shifted.dtype)
        yo = y0 - my0
        yi = 0
        ny = min(y0+H, my0+mh) - max(y0, my0)
        if yo < 0:
            yi = -yo
            yo = 0
        xo = x0 - mx0
        xi = 0
        nx = min(x0+W, mx0+mw) - max(x0, mx0)
        if xo < 0:
            xi = -xo
            xo = 0
        mm[yo:yo+ny, xo:xo+nx] = shifted[yi:yi+ny, xi:xi+nx]
        return Patch(mx0, my0, mm)
Esempio n. 9
0
    def getPointSourcePatch(self, px, py, minval=0., modelMask=None, **kwargs):
        from scipy.ndimage.filters import correlate1d
        from astrometry.util.miscutils import get_overlapping_region

        img = self.getImage(px, py)

        H, W = img.shape
        ix = int(np.round(px))
        iy = int(np.round(py))
        dx = px - ix
        dy = py - iy
        x0 = ix - W / 2
        y0 = iy - H / 2

        if modelMask is not None:
            mh, mw = modelMask.shape
            mx0, my0 = modelMask.x0, modelMask.y0

            # print 'PixelizedPSF + modelMask'
            # print 'mx0,my0', mx0,my0, '+ mw,mh', mw,mh
            # print 'PSF image x0,y0', x0,y0, '+ W,H', W,H

            if (mx0 >= x0 + W or my0 >= y0 + H or mx0 + mw <= x0
                    or my0 + mh <= y0):
                # No overlap
                return None
            # Otherwise, we'll just produce the Lanczos-shifted PSF image as usual,
            # and then copy it into the modelMask space.

        L = self.Lorder
        Lx = lanczos_filter(L, np.arange(-L, L + 1) + dx)
        Ly = lanczos_filter(L, np.arange(-L, L + 1) + dy)
        # Normalize the Lanczos interpolants (preserve flux)
        Lx /= Lx.sum()
        Ly /= Ly.sum()
        sx = correlate1d(img, Lx, axis=1, mode='constant')
        shifted = correlate1d(sx, Ly, axis=0, mode='constant')
        if modelMask is None:
            return Patch(x0, y0, shifted)

        # Pad or clip to modelMask size
        mm = np.zeros((mh, mw), shifted.dtype)
        yo = y0 - my0
        yi = 0
        ny = min(y0 + H, my0 + mh) - max(y0, my0)
        if yo < 0:
            yi = -yo
            yo = 0
        xo = x0 - mx0
        xi = 0
        nx = min(x0 + W, mx0 + mw) - max(x0, mx0)
        if xo < 0:
            xi = -xo
            xo = 0
        mm[yo:yo + ny, xo:xo + nx] = shifted[yi:yi + ny, xi:xi + nx]
        return Patch(mx0, my0, mm)
Esempio n. 10
0
 def refine_grid(
         self,
         data = None,
         i0 = None, i1 = None,
         j0 = None, j1 = None,
         k0 = None, k1 = None,
         dorder = [(0, 0, 0)],
         factor = 2):
     """
         meant to be called for regularly spaced data, otherwise results make no sense.
     """
     if (not self.initialized):
         self.initialize()
     beta_vals = np.empty((len(dorder), 3, factor, len(self.bx[0][0])), dtype = data.dtype)
     for o in range(len(dorder)):
         for i in range(factor):
             beta_vals[o, 0, i] = np.array([self.bx[0][dorder[o][0]][k](i*1./factor)
                                            for k in range(len(self.bx[0][0]))])
             beta_vals[o, 1, i] = np.array([self.bx[0][dorder[o][1]][k](i*1./factor)
                                            for k in range(len(self.bx[0][0]))])
             beta_vals[o, 2, i] = np.array([self.bx[0][dorder[o][2]][k](i*1./factor)
                                            for k in range(len(self.bx[0][0]))])
     if len(data.shape) == 3:
         result = np.empty((len(dorder), (k1 - k0)*factor, (j1 - j0)*factor, (i1 - i0)*factor), dtype = data.dtype)
         for cx in range(factor):
             for cy in range(factor):
                 for cz in range(factor):
                     result[:, cz:result.shape[1]:factor, cy:result.shape[2]:factor, cx:result.shape[3]:factor] = sum(sum(sum(
                             data     [None, k0+kk-self.n:k1+kk-self.n, j0+jj-self.n:j1+jj-self.n, i0+ii-self.n:i1+ii-self.n]
                           * beta_vals[   :, 0,     None,        None,        None, cx, ii] for ii in range(len(self.bx[0][0])))
                           * beta_vals[   :, 1,     None,        None,        None, cy, jj] for jj in range(len(self.bx[0][0])))
                           * beta_vals[   :, 2,     None,        None,        None, cz, kk] for kk in range(len(self.bx[0][0])))
     elif len(data.shape) == 4:
         result = np.empty((len(dorder), (k1 - k0)*factor, (j1 - j0)*factor, (i1 - i0)*factor, 3), dtype = data.dtype)
         for cx in range(factor):
             for cy in range(factor):
                 for cz in range(factor):
                     for coord in range(3):
                         for o in range(len(dorder)):
                             tmp = correlate1d(data[:, :, :, coord], np.array(beta_vals[o, 0, cx, :]), axis = 2)
                             tmp = correlate1d(                 tmp, np.array(beta_vals[o, 1, cy, :]), axis = 1)
                             tmp = correlate1d(                 tmp, np.array(beta_vals[o, 2, cz, :]), axis = 0)
                             result[ o,
                                     cz:result.shape[1]:factor,
                                     cy:result.shape[2]:factor,
                                     cx:result.shape[3]:factor,
                                     coord] = tmp[self.n:result.shape[1]+self.n,
                                                  self.n:result.shape[2]+self.n,
                                                  self.n:result.shape[3]+self.n]
                     #result[:, cz:result.shape[1]:factor, cy:result.shape[2]:factor, cx:result.shape[3]:factor] = sum(sum(sum(
                     #        data     [None, k0+kk-self.n:k1+kk-self.n, j0+jj-self.n:j1+jj-self.n, i0+ii-self.n:i1+ii-self.n,            :]
                     #      * beta_vals[   :, 0,     None,        None,        None, cx, ii, None] for ii in range(len(self.bx[0][0])))
                     #      * beta_vals[   :, 1,     None,        None,        None, cy, jj, None] for jj in range(len(self.bx[0][0])))
                     #      * beta_vals[   :, 2,     None,        None,        None, cz, kk, None] for kk in range(len(self.bx[0][0])))
     return result
Esempio n. 11
0
def downsample(imgIn):
    '''
    Downsample an image to half its size after smoothing with a binomial
    kernel (after Burt & Adelson).
    '''
    filter = 1.0 / 16 * numpy.array([1, 4, 6, 4, 1])
    lowpass = filters.correlate1d(imgIn, filter, 0)
    lowpass = filters.correlate1d(lowpass, filter, 1)
    
    sample = lowpass[::2, ::2, ...]
    return sample
Esempio n. 12
0
def raytraceX(obj, ps, sys_index, nimgs=None, eps=None):

    #---------------------------------------------------------------------------
    # Find the derivative of the arrival time surface.
    #---------------------------------------------------------------------------
    arrival = obj.basis.arrival_grid(ps)[sys_index]

    w = central_diff_weights(3)
    d = abs(correlate1d(arrival, w, axis=0, mode='constant')) \
      + abs(correlate1d(arrival, w, axis=1, mode='constant'))
    d = d[1:-1,1:-1]

    pl.matshow(d)

    xy = obj.basis.refined_xy_grid(ps)[1:-1,1:-1]

    # Create flattened *views*
    xy     = xy.ravel()
    dravel = d.ravel()

    imgs = []
    offs = []
    print 'searching...'
    for i in argsort(dravel):

        if nimgs == len(imgs): break

        new_image = True
        for img in imgs:
            if abs(img-xy[i]) <= eps: 
                new_image = False

        if new_image:
            imgs.append(xy[i])
            offs.append(i)

    #---------------------------------------------------------------------------
    # Print the output
    #---------------------------------------------------------------------------

    if imgs:
        #print imgs
        #if len(imgs) % 2 == 1: imgs = imgs[:-1]  # XXX: Correct?
        imgs = array(imgs)

        g0 = array(arrival[1:-1,1:-1], copy=True)
        g0ravel = g0.ravel()
        times = g0ravel[offs]
        order = argsort(times)
    else:
        order = []

    return [(times[i], imgs[i]) for i in order]
Esempio n. 13
0
 def refine_grid(
         self,
         data = None,
         i0 = None, i1 = None,
         j0 = None, j1 = None,
         k0 = None, k1 = None,
         dorder = [(0, 0, 0)],
         factor = 2):
     """
         meant to be called for regularly spaced data, otherwise results make no sense.
     """
     beta_vals = np.empty((len(dorder), 3, factor, len(self.bx[0][0])), dtype = data.dtype)
     for o in range(len(dorder)):
         for i in range(factor):
             beta_vals[o, 0, i] = np.array([self.bx[0][dorder[o][0]][k](i*1./factor)
                                            for k in range(len(self.bx[0][0]))])
             beta_vals[o, 1, i] = np.array([self.bx[0][dorder[o][1]][k](i*1./factor)
                                            for k in range(len(self.bx[0][0]))])
             beta_vals[o, 2, i] = np.array([self.bx[0][dorder[o][2]][k](i*1./factor)
                                            for k in range(len(self.bx[0][0]))])
     if len(data.shape) == 3:
         result = np.empty((len(dorder), (k1 - k0)*factor, (j1 - j0)*factor, (i1 - i0)*factor), dtype = data.dtype)
         for cx in range(factor):
             for cy in range(factor):
                 for cz in range(factor):
                     result[:, cz:result.shape[1]:factor, cy:result.shape[2]:factor, cx:result.shape[3]:factor] = sum(sum(sum(
                             data     [None, k0+kk-self.n:k1+kk-self.n, j0+jj-self.n:j1+jj-self.n, i0+ii-self.n:i1+ii-self.n]
                           * beta_vals[   :, 0,     None,        None,        None, cx, ii] for ii in range(len(self.bx[0][0])))
                           * beta_vals[   :, 1,     None,        None,        None, cy, jj] for jj in range(len(self.bx[0][0])))
                           * beta_vals[   :, 2,     None,        None,        None, cz, kk] for kk in range(len(self.bx[0][0])))
     elif len(data.shape) == 4:
         result = np.empty((len(dorder), (k1 - k0)*factor, (j1 - j0)*factor, (i1 - i0)*factor, 3), dtype = data.dtype)
         for cx in range(factor):
             for cy in range(factor):
                 for cz in range(factor):
                     for coord in range(3):
                         for o in range(len(dorder)):
                             tmp = correlate1d(data[:, :, :, coord], np.array(beta_vals[o, 0, cx, :]), axis = 2)
                             tmp = correlate1d(                 tmp, np.array(beta_vals[o, 1, cy, :]), axis = 1)
                             tmp = correlate1d(                 tmp, np.array(beta_vals[o, 2, cz, :]), axis = 0)
                             result[ o,
                                     cz:result.shape[1]:factor,
                                     cy:result.shape[2]:factor,
                                     cx:result.shape[3]:factor,
                                     coord] = tmp[self.n:result.shape[1]+self.n,
                                                  self.n:result.shape[2]+self.n,
                                                  self.n:result.shape[3]+self.n]
                     #result[:, cz:result.shape[1]:factor, cy:result.shape[2]:factor, cx:result.shape[3]:factor] = sum(sum(sum(
                     #        data     [None, k0+kk-self.n:k1+kk-self.n, j0+jj-self.n:j1+jj-self.n, i0+ii-self.n:i1+ii-self.n,            :]
                     #      * beta_vals[   :, 0,     None,        None,        None, cx, ii, None] for ii in range(len(self.bx[0][0])))
                     #      * beta_vals[   :, 1,     None,        None,        None, cy, jj, None] for jj in range(len(self.bx[0][0])))
                     #      * beta_vals[   :, 2,     None,        None,        None, cz, kk, None] for kk in range(len(self.bx[0][0])))
     return result
Esempio n. 14
0
def bandpass(image, lshort, llong, threshold=None, truncate=4):
    """Remove noise and background variation.

    Convolve with a Gaussian to remove short-wavelength noise and subtract out
    long-wavelength variations, retaining features of intermediate scale.

    This implementation relies on scipy.ndimage.filters.gaussian_filter, and it
    is the fastest way known to the authors of performing a bandpass in
    Python.

    Parameters
    ----------
    image : ndarray
    lshort : small-scale cutoff (noise)
    llong : large-scale cutoff
    for both lshort and llong:
        give a tuple value for different sizes per dimension
        give int value for same value for all dimensions
        when 2*lshort >= llong, no noise filtering is applied
    threshold : float or integer
        By default, 1 for integer images and 1/256. for float images.

    Returns
    -------
    result : array
        the bandpassed image

    See Also
    --------
    legacy_bandpass, legacy_bandpass_fftw
    """
    lshort = validate_tuple(lshort, image.ndim)
    llong = validate_tuple(llong, image.ndim)
    if np.any([x*2 >= y for (x, y) in zip(lshort, llong)]):
        raise ValueError("The smoothing length scale must be more" +
                         "than twice the noise length scale.")
    if threshold is None:
        if np.issubdtype(image.dtype, np.integer):
            threshold = 1
        else:
            threshold = 1/256.
    boxcar = image.copy()
    result = np.array(image, dtype=np.float)
    for axis, (sigma, smoothing) in enumerate(zip(lshort, llong)):
        if smoothing > 1:
            uniform_filter1d(boxcar, 2*smoothing+1, axis, output=boxcar,
                             mode='nearest', cval=0)
        if sigma > 0:
            correlate1d(result, gaussian_kernel(sigma, truncate), axis,
                        output=result, mode='constant', cval=0.0)
    result -= boxcar
    return np.where(result > threshold, result, 0)
Esempio n. 15
0
def lowpass(image, sigma=1, truncate=4):
    sigma = validate_tuple(sigma, image.ndim)  #convert sigma to 2 dimension
    result = np.array(image, dtype=np.float)  #convert image to np.float
    for axis, _sigma in enumerate(
            sigma):  #convolve gaussian about the x and y axis
        if _sigma > 0:
            correlate1d(result,
                        gaussian_kernel(_sigma, truncate),
                        axis,
                        output=result,
                        mode='constant',
                        cval=0.0)
    return result
Esempio n. 16
0
def upsample(imgIn):
    '''
    Upsample an image to twice its size and interpolate missing pixel
    values by smoothing.
    '''
    h,w = imgIn.shape[0:2]
    sample = numpy.zeros((h*2, w*2) + imgIn.shape[2:], imgIn.dtype)
    sample[::2, ::2, ...] = imgIn
    
    filter = 1.0 / 8 * numpy.array([1, 4, 6, 4, 1])
    lowpass = filters.correlate1d(sample, filter, 0, mode="constant")
    lowpass = filters.correlate1d(lowpass, filter, 1, mode="constant")
    return lowpass
Esempio n. 17
0
def separableCrossCorrelate(data, vx, vy):
    """Cross-correlate 2D data with 1D kernels in X and then Y

    @param data    The input 2D ndarray
    @param vx      The x-vector for cross-correlation
    @param vy      The y-vector for cross-correlation

    @return out    The cross-correlated 2D array.
    """

    mode = 'reflect'
    out0 = filt.correlate1d(data, vx, mode=mode)
    out = filt.correlate1d(out0, vy, mode=mode, axis=0)
    return out
Esempio n. 18
0
def separableCrossCorrelate(data, vx, vy):
    """Cross-correlate 2D data with 1D kernels in X and then Y

    @param data    The input 2D ndarray
    @param vx      The x-vector for cross-correlation
    @param vy      The y-vector for cross-correlation

    @return out    The cross-correlated 2D array.
    """
    
    mode = 'reflect'
    out0 = filt.correlate1d(data, vx, mode=mode)
    out  = filt.correlate1d(out0, vy, mode=mode, axis=0)
    return out
Esempio n. 19
0
def Dn(x, y, Np, ndiv=1, axis=-1, mode='strip', cval=0.):
    """ central numerical derivative using Np points of order ndiv
        (Np>1 and odd), using convolution
        Data needs to be equally spaced in x
        can use mode= 'nearest', 'wrap', 'reflect', 'constant'
                      'strip'
                      'strip' will just cut the bad data at the ends
        cval is for 'constant'
        returns x', d^n y/dx^n(x')

        Note the algorithm is not intended to remove noise
        But to provide more accurate derivative of a function.
        The larger Np the more deriviatives are available.
        It basically finds the best taylor series parameter
        assuming Np around the center are available:
          assuming f_k = f(xo + k dx),  k=-n .. n, Np=2*n+1
          and with f(x) = f(xo) + f' (x-xo) + f''(x-xo)^2/2 + ....
                        = f(xo) + f' k dx + ((f'' dx**2)/2) k**2 + ...
          we want to solve for (1, f', f'' dx**2/2, ...)
          and we pick the answer for the correct derrivative.
    """
    dx = x[1] - x[0]
    kernel = central_diff_weights(Np, ndiv=ndiv)
    strip = False
    if mode == 'strip':
        strip = True
        mode = 'reflect'
    dy = filters.correlate1d(y, kernel, axis=axis, mode=mode, cval=cval)
    D = dy / dx**ndiv
    if strip:
        x, D = _do_strip(x, D, Np, axis=axis)
    return x, D
Esempio n. 20
0
    def plot_pos(self, impact_parameter, radius, phi):
        """
        Perform intersection over all angles and return length

        Parameters
        ----------
        impact_parameter: float
            Impact distance from mirror centre
        ang: ndarray
            Angles over which to integrate
        phi: float
            Rotation angle of muon image
        
        Returns
        -------
        ndarray
            Chord length for each angle
        """

        bins = int((2 * np.pi * radius) / self.pixel_width.value) * self.oversample_bins
        #ang = np.linspace(-np.pi * u.rad + phi, np.pi * u.rad + phi, bins)
        ang = np.linspace(-np.pi + phi, np.pi + phi, bins)
        l = self.intersect_circle(impact_parameter, ang)
        l = correlate1d(l, np.ones(self.oversample_bins), mode='wrap', axis=0)
        l /= self.oversample_bins

        return ang, l
Esempio n. 21
0
    def plot_pos(self, impact_parameter, radius, phi):
        """
        Perform intersection over all angles and return length

        Parameters
        ----------
        impact_parameter: float
            Impact distance from mirror centre
        ang: ndarray
            Angles over which to integrate
        phi: float
            Rotation angle of muon image

        Returns
        -------
        ndarray
            Chord length for each angle
        """

        bins = int((2 * np.pi * radius) /
                   self.pixel_width.value) * self.oversample_bins
        # ang = np.linspace(-np.pi * u.rad + phi, np.pi * u.rad + phi, bins)
        ang = np.linspace(-np.pi + phi, np.pi + phi, bins)
        l = self.intersect_circle(impact_parameter, ang)
        l = correlate1d(l, np.ones(self.oversample_bins), mode='wrap', axis=0)
        l /= self.oversample_bins

        return ang, l
Esempio n. 22
0
def create_profile(mirror_radius, hole_radius, impact_parameter, radius, phi, pixel_diameter, oversampling=3):
    """
    Perform intersection over all angles and return length

    Parameters
    ----------
    impact_parameter: float
        Impact distance from mirror center
    ang: ndarray
        Angles over which to integrate
    phi: float
        Rotation angle of muon image

    Returns
    -------
    ndarray
        Chord length for each angle
    """
    circumference = 2 * np.pi * radius
    pixels_on_circle = int(circumference / pixel_diameter)

    ang = phi + linspace_two_pi(pixels_on_circle * oversampling)

    length = intersect_circle(mirror_radius, impact_parameter, ang, hole_radius)
    length = correlate1d(length, np.ones(oversampling), mode='wrap', axis=0)
    length /= oversampling

    return ang, length
Esempio n. 23
0
def Dn(x, y, Np, ndiv=1, axis=-1, mode='strip', cval=0.):
    """ central numerical derivative using Np points of order ndiv
        (Np>1 and odd), using convolution
        Data needs to be equally spaced in x
        can use mode= 'nearest', 'wrap', 'reflect', 'constant'
                      'strip'
                      'strip' will just cut the bad data at the ends
        cval is for 'constant'
        returns x', d^n y/dx^n(x')

        Note the algorithm is not intended to remove noise
        But to provide more accurate derivative of a function.
        The larger Np the more deriviatives are available.
        It basically finds the best taylor series parameter
        assuming Np around the center are available:
          assuming f_k = f(xo + k dx),  k=-n .. n, Np=2*n+1
          and with f(x) = f(xo) + f' (x-xo) + f''(x-xo)^2/2 + ....
                        = f(xo) + f' k dx + ((f'' dx**2)/2) k**2 + ...
          we want to solve for (1, f', f'' dx**2/2, ...)
          and we pick the answer for the correct derrivative.
    """
    dx = x[1]-x[0]
    kernel = central_diff_weights(Np,ndiv=ndiv)
    strip = False
    if mode=='strip':
        strip = True
        mode = 'reflect'
    dy = filters.correlate1d(y, kernel, axis=axis, mode=mode, cval=cval)
    D = dy/dx**ndiv
    if strip:
        x, D = _do_strip(x, D, Np, axis=axis)
    return x, D
Esempio n. 24
0
    def perform_spectral_analysis(self):
        # FIX: Consolidate with map analysis
        # Performs same analysis functions as the map, but just on the single (1D) total spectrum

        # Smoothing (presently performed for individual detectors)
        sigma_spec = self.settings[
            'spectral_smooth_gauss_sigma']  # In terms of frames?
        width_spec = self.settings[
            'spectral_smooth_savgol_width']  # In terms of frames?
        order_spec = self.settings['spectral_smooth_savgol_order']

        if self.settings['spectral_smooth_type'] == 'Gaussian':
            print('spectral smoothing...')
            self.total_spec = gaussian_filter(self.total_spec, sigma_spec)
        elif self.settings['spectral_smooth_type'] == 'Savitzky-Golay':
            # Currently always uses 4th order polynomial to fit
            print('spectral smoothing...')
            self.total_spec = savgol_filter(self.total_spec,
                                            1 + 2 * width_spec, order_spec)

        # Background subtraction (implemented detector-wise currently)
        # NOTE: INSUFFICIENT SPATIAL SMOOTHING MAY GIVE INACCURATE OR EVEN INF RESULTS
        if not (self.settings['subtract_ke1'] == 'None'):
            print('Performing background subtraction...')
            # Fit a power law to the background
            # get background range
            ke_min = self.settings['ke1_start']
            ke_max = self.settings['ke1_stop']
            fit_map = (self.ke_interp > ke_min) * (self.ke_interp < ke_max)
            ke_to_fit = self.ke_interp[fit_map]
            spec_to_fit = self.total_spec[fit_map]

            if self.settings['subtract_ke1'] == 'Power Law':
                # Fit power law
                A, m = self.fit_powerlaw(ke_to_fit, spec_to_fit)
                bg = A * self.ke_interp**m
            elif self.settings['subtract_ke1'] == 'Linear':
                # Fit line (there may be an easier way for 1D case)
                m, b = self.fit_line(ke_to_fit, spec_to_fit)
                bg = m * self.ke_interp + b

            self.total_spec -= bg

        if self.settings['subtract_tougaard']:
            R_loss = self.settings['R_loss']
            E_loss = self.settings['E_loss']
            dE = self.ke_interp[1] - self.ke_interp[0]
            # Always use a kernel out to 3 * E_loss to ensure enough feature size
            ke_kernel = np.arange(0, 3 * E_loss, abs(dE))
            if not np.mod(len(ke_kernel), 2) == 0:
                ke_kernel = np.arange(0, 3 * E_loss + dE, abs(dE))
            self.K_toug = (8.0 / np.pi**2) * R_loss * E_loss**2 * ke_kernel / (
                (2.0 * E_loss / np.pi)**2 + ke_kernel**2)**2
            # Normalize the kernel so the its area is equal to R_loss
            self.K_toug /= (np.sum(self.K_toug) * dE) / R_loss
            self.total_spec -= dE * correlate1d(self.total_spec,
                                                self.K_toug,
                                                mode='nearest',
                                                origin=-len(ke_kernel) // 2,
                                                axis=0)
Esempio n. 25
0
def exponential_filter1d(input, sigma, axis=-1, output=None,
                      mode="reflect", cval=0.0, truncate=10.0):
    """One-dimensional Exponential filter.
    Parameters
    ----------
    %(input)s
    sigma : scalar
        Tau of exponential kernel
    %(axis)s
    %(output)s
    %(mode)s
    %(cval)s
    truncate : float
        Truncate the filter at this many standard deviations.
        Default is 4.0.
    Returns
    -------
    gaussian_filter1d : ndarray
    """
    sd = float(sigma)
    # make the radius of the filter equal to truncate standard deviations
    lw = int(truncate * sd + 0.5)
    weights = [0.0] * (2 * lw + 1)
    weights[lw] = 1.0
    sum = 1.0
    # calculate the kernel:
    for ii in range(1, lw + 1):
        tmp = math.exp(-float(ii) / sd)
        weights[lw + ii] = tmp*0
        weights[lw - ii] = tmp
        sum += tmp
    for ii in range(2 * lw + 1):
        weights[ii] /= sum
    return correlate1d(input, weights, axis, output, mode, cval, 0)
Esempio n. 26
0
 def thread(_, start, stop):
     weight_x, weight_x_origin = SIFT.__weight_x, SIFT.__weight_x_origin
     tmp = empty_like(im)  # INTERMEDIATE: im.shape * nthreads
     for a in xrange(start, stop):
         #pylint: disable=unsubscriptable-object
         correlate1d(im_orientation[a, :, :],
                     weight_x,
                     0,
                     mode='constant',
                     origin=weight_x_origin,
                     output=tmp)
         correlate1d(tmp,
                     weight_x,
                     1,
                     mode='constant',
                     origin=weight_x_origin,
                     output=im_orientation[a, :, :])
def scharr(input, axis=-1, output=None, mode="reflect", cval=0.0):
    """Calculate a Scharr filter.

    Parameters
    ----------
    %(input)s
    %(axis)s
    %(output)s
    %(mode)s
    %(cval)s

    """
    input = np.asarray(input)
    axis = _ni_support._check_axis(axis, input.ndim)
    output, return_value = _ni_support._get_output(output, input)
    correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0)
    axes = [ii for ii in range(input.ndim) if ii != axis]
    for ii in axes:
        correlate1d(output, [3, 10, 3], ii, output, mode, cval, 0)
    return return_value
Esempio n. 28
0
def deriv(env, model, obj_index, src_index, m, axis, R):
    w = central_diff_weights(5)
    #d = correlate1d(m, w, axis=axis, mode='constant')
    d = (correlate1d(m, -w, axis=0, mode='constant')) \
      + (correlate1d(m,  w, axis=1, mode='constant'))
    d = (correlate1d(d, -w, axis=0, mode='constant')) \
      + (correlate1d(d,  w, axis=1, mode='constant'))
    d = d[2:-2,2:-2]
    d[d>.8] = .8
    d[d<-.8] = -.8
    #d = correlate1d(d, w, axis=axis, mode='constant')
    #d = diff(d, axis=axis)
    #d /= abs(d)
    #d = correlate1d(d, w, axis=axis, mode='constant')
    #d = diff(d, axis=axis)

    R -= model[0].basis.top_level_cell_size * 2
    #R -= model[0].basis.top_level_cell_size * 2
    pl.matshow(d, extent=[-R,R,-R,R])
    glspl.colorbar()
    arrival_plot(model, obj_index, src_index, only_contours=True, clevels=200)
Esempio n. 29
0
 def __call__(self, im, out=None, region=None, nthreads=1):
     from scipy.ndimage.filters import correlate1d
     from ._base import get_image_region, replace_sym_padding, round_u8_steps
     if self.__compat:
         # In compatibility mode, instead of symmetric reflections we pad with 0s
         im, region = replace_sym_padding(im, 7, region, 15, nthreads)
     else:
         im, region = get_image_region(im, 15, region, nthreads=nthreads)
     gauss_filter = SIFT.__gauss_filter
     im = correlate1d(im, gauss_filter, 0,
                      mode='nearest')  # INTERMEDIATE: im.shape
     im = correlate1d(im, gauss_filter, 1,
                      mode='nearest')  # TODO: TEMPORARY: im.shape
     if self.__compat: round_u8_steps(im)
     im = SIFT.__dense_sift(im, None, nthreads)
     region = (region[0] - 7, region[1] - 7, region[2] - 7, region[3] - 7)
     im = im[:, region[0]:region[2], region[1]:region[3]]
     if out is not None:
         from numpy import copyto
         copyto(out, im)
     return im
Esempio n. 30
0
def deriv(env, model, obj_index, src_index, m, axis, R):
    w = central_diff_weights(5)
    #d = correlate1d(m, w, axis=axis, mode='constant')
    d = (correlate1d(m, -w, axis=0, mode='constant')) \
      + (correlate1d(m,  w, axis=1, mode='constant'))
    d = (correlate1d(d, -w, axis=0, mode='constant')) \
      + (correlate1d(d,  w, axis=1, mode='constant'))
    d = d[2:-2, 2:-2]
    d[d > .8] = .8
    d[d < -.8] = -.8
    #d = correlate1d(d, w, axis=axis, mode='constant')
    #d = diff(d, axis=axis)
    #d /= abs(d)
    #d = correlate1d(d, w, axis=axis, mode='constant')
    #d = diff(d, axis=axis)

    R -= model[0].basis.top_level_cell_size * 2
    #R -= model[0].basis.top_level_cell_size * 2
    pl.matshow(d, extent=[-R, R, -R, R])
    glspl.colorbar()
    arrival_plot(model, obj_index, src_index, only_contours=True, clevels=200)
Esempio n. 31
0
    def getPointSourcePatch(self, px, py, minval=0., **kwargs):
        from scipy.ndimage.filters import correlate1d
        H,W = self.img.shape
        ix = int(np.round(px))
        iy = int(np.round(py))
        dx = px - ix
        dy = py - iy
        x0 = ix - W/2
        y0 = iy - H/2
        L = self.Lorder
        Lx = lanczos_filter(L, np.arange(-L, L+1) + dx)
        Ly = lanczos_filter(L, np.arange(-L, L+1) + dy)
        sx      = correlate1d(self.img, Lx, axis=1, mode='constant')
        shifted = correlate1d(sx,       Ly, axis=0, mode='constant')
        #shifted /= (Lx.sum() * Ly.sum())
        #print 'Shifted PSF: range', shifted.min(), shifted.max()

        ### ???
        #shifted = np.maximum(shifted, 0.)

        shifted /= shifted.sum()
        return Patch(x0, y0, shifted)
Esempio n. 32
0
def lowpass(image, lshort, threshold=None):
    """Convolve with a Gaussian to remove short-wavelength noise.

    Parameters
    ----------
    image : ndarray
    lshort : small-scale cutoff (noise)
        give a tuple value for different sizes per dimension
        give int value for same value for all dimensions
    threshold : float or integer
        By default, 1 for integer images and 1/256. for float images.

    Returns
    -------
    result : array
        the filtered image

    See Also
    --------
    bandpass
    """
    lshort = validate_tuple(lshort, image.ndim)
    if threshold is None:
        if np.issubdtype(image.dtype, np.integer):
            threshold = 1
        else:
            threshold = 1/256.
    result = np.array(image, dtype=np.float)
    for (axis, size) in enumerate(lshort):
        if size > 0:
            correlate1d(result, gaussian_kernel(size, 4), axis,
                        output=result, mode='constant', cval=0.0)
    try:
        frame_no = image.frame_no
    except AttributeError:
        frame_no = None
    return Frame(np.where(result > threshold, result, 0), frame_no)
Esempio n. 33
0
def exponential_filter1d(input,
                         sigma,
                         axis=-1,
                         output=None,
                         mode="reflect",
                         cval=0.0,
                         truncate=10.0,
                         power=1):
    """
    One-dimensional Exponential filter.

    Parameters
    ----------
    input
    sigma : scalar
        Tau of exponential kernel
    axis : int, optional
        The axis of input along which to calculate. 
        Default is -1.

    output : array or dtype, optional
        The array in which to place the output, or the dtype of the returned array. 
        By default an array of the same dtype as input will be created.
        
    mode : {‘reflect’, ‘constant’, ‘nearest’, ‘mirror’, ‘wrap’}, optional
        The mode parameter determines how the input array is extended beyond its boundaries. 
        Default is ‘reflect’.
    cval : scalar, optional
        Value to fill past edges of input if mode is ‘constant’. Default is 0.0.
    truncate : float
        Truncate the filter at this many standard deviations.
        Default is 4.0.

    """
    sd = float(sigma)
    # make the radius of the filter equal to truncate standard deviations
    lw = int(truncate * sd + 0.5)
    weights = [0.0] * (2 * lw + 1)
    weights[lw] = 1.0
    sum = 1.0
    # calculate the kernel:
    for ii in range(1, lw + 1):
        tmp = math.exp(-(float(ii) / sd)**power)
        weights[lw + ii] = tmp * 0
        weights[lw - ii] = tmp
        sum += tmp
    for ii in range(2 * lw + 1):
        weights[ii] /= sum
    return correlate1d(input, weights, axis, output, mode, cval, 0)
    def gaussian_filter(img, sigma, order, output):
        
        inp = np.asarray(img)
        output = np.zeros(inp.shape)
        orders = order

        for i in range(2):
            sd = float(sigma)
            if sd > 1e-15:
                lw = int(4.0 * sd + 0.5)
                weights = gaussian_kernel(sigma, orders[i], lw)[::-1]
                output = correlate1d(inp, weights, i, output)
                inp = output
        
        return output
Esempio n. 35
0
File: dave.py Progetto: temik42/dave
 def convol(self, X, mul = (0,0,0)):
     out = np.array(X).astype(np.float32)
     for i in range(0,3):
         if (mul[i] == 0):
             correlate1d(out, self.exp[i], axis=i, output = out)
         if (mul[i] == 1):
             correlate1d(out, self.exp[i]*self.x[i], axis=i, output = out)
         if (mul[i] == 2):
             correlate1d(out, self.exp[i]*self.x[i]**2, axis=i, output = out)    
     return out
Esempio n. 36
0
def _z2rEnhanced_mdcorr(z, xmode='reflect', ymode='wrap'):
    """multidimensional version
    assuming the two last dimensions represent a 2-D image
    Uses scipy.ndimage.filters.correlate to reduce the number of for-loops
    even more.
    """
    # get the shape of the input
    dimy = z.shape[-2]
    dimx = z.shape[-1]

    # calculate the decibel values from the input
    db = decibel(z)
    # calculate the shower differences by 1-d correlation with a differencing
    # kernel
    db_diffx = np.abs(filters.correlate1d(db, [1, -1], axis=-1, mode=xmode, origin=-1))
    db_diffy = np.abs(filters.correlate1d(db, [1, -1], axis=-2, mode=ymode, origin=-1))

    diffxmode = 'wrap' if xmode == 'wrap' else 'constant'
    diffymode = 'wrap' if ymode == 'wrap' else 'constant'
    diffx_sum1 = filters.correlate1d(db_diffx, [1, 1, 1], axis=-2, mode=diffymode)
    diffxsum = filters.correlate1d(diffx_sum1, [1, 1, 0], axis=-1, mode=diffxmode)
    diffy_sum1 = filters.correlate1d(db_diffy, [1, 1, 1], axis=-1, mode=diffxmode)
    diffysum = filters.correlate1d(diffy_sum1, [1, 1, 0], axis=-2, mode=diffymode)

    divider = np.ones(db.shape) * 12.
    if xmode != 'wrap':
        divider[..., [0, -1]] = np.rint((divider[..., [0, -1]] + 1) / 1.618) - 1
    if ymode != 'wrap':
        divider[..., [0, -1], :] = np.rint((divider[..., [0, -1], :] + 1) / 1.618) - 1

    # the shower index is the sum of the x- and y-differences
    si = (diffxsum + diffysum) / divider

    # set up our rainfall output array
    r = np.zeros(z.shape)

    gt44 = db > 44.
    r[gt44] = z2r(z[gt44], a=77, b=1.9)
    si[gt44] = -1.
    # the same is true for values between 36.5 and 44 dBZ
    bt3644 = (db >= 36.5) & (db <= 44.)
    r[bt3644] = z2r(z[bt3644], a=200, b=1.6)
    si[bt3644] = -2.

    si1 = (si >= 0.)
    si2 = si1 & (si < 3.5)
    si3 = si1 & ~si2 & (si <= 7.5)
    si4 = si > 7.5

    r[si2] = z2r(z[si2], a=125, b=1.4)
    r[si3] = z2r(z[si3], a=200, b=1.6)
    r[si4] = z2r(z[si4], a=320, b=1.4)

    return r, si
Esempio n. 37
0
    def plot_pos(self,impact_dist,radius,phi):
        """
        Perform intersection over all angles and return length
        :param impact_dist: float
            Impact distance from mirror centre
        :param ang: ndarray
            Angles over which to integrate
        :param phi: float
            Rotation angle of muon image
        :return: ndarray
            Chord length for each angle
        """

        bins = int((2 * math.pi * radius)/self.pixel_width) * self.oversample_bins
        ang = np.linspace(-1*math.pi*u.rad+phi, 1*math.pi*u.rad+phi,bins*1)
        l = self.intersect_circle(impact_dist,ang)
        l = correlate1d(l,np.ones(self.oversample_bins),mode="wrap",axis=0)
        l /= self.oversample_bins

        return ang,l
Esempio n. 38
0
    def plot_pos(self, impact_dist, radius, phi):
        """
        Perform intersection over all angles and return length
        :param impact_dist: float
            Impact distance from mirror centre
        :param ang: ndarray
            Angles over which to integrate
        :param phi: float
            Rotation angle of muon image
        :return: ndarray
            Chord length for each angle
        """

        bins = int(
            (2 * math.pi * radius) / self.pixel_width) * self.oversample_bins
        ang = np.linspace(-1 * math.pi * u.rad + phi,
                          1 * math.pi * u.rad + phi, bins * 1)
        l = self.intersect_circle(impact_dist, ang)
        l = correlate1d(l, np.ones(self.oversample_bins), mode="wrap", axis=0)
        l /= self.oversample_bins

        return ang, l
Esempio n. 39
0
def grad_tau(env, model, obj_index, which, src_index):

    assert which in ['x', 'y'], "grad_tau: 'which' must be one of 'x' or 'y'"

    #print "grad_tau"
    obj, ps = model['obj,data'][obj_index]
    R = obj.basis.mapextent

    #---------------------------------------------------------------------------
    # Find the derivative of the arrival time surface.
    #---------------------------------------------------------------------------
    arrival = obj.basis.arrival_grid(ps)[src_index]

    w = central_diff_weights(3)

    which = 1 if which == 'x' else 0
    d = correlate1d(arrival, w, axis=which, mode='constant')

    d = d[1:-1, 1:-1]
    d[np.abs(d) < 1e-3] = 0
    d[d > 0] = 1
    d[d < 0] = -1
    pl.matshow(d, fignum=False, extent=[-R, R, -R, R], alpha=0.5)
Esempio n. 40
0
def grad_tau(env, model, obj_index, which, src_index):

    assert which in ['x','y'], "grad_tau: 'which' must be one of 'x' or 'y'"

    #print "grad_tau"
    obj,ps = model['obj,data'][obj_index]
    R = obj.basis.mapextent

    #---------------------------------------------------------------------------
    # Find the derivative of the arrival time surface.
    #---------------------------------------------------------------------------
    arrival = obj.basis.arrival_grid(ps)[src_index]

    w = central_diff_weights(3)

    which = 1 if which == 'x' else 0
    d = correlate1d(arrival, w, axis=which, mode='constant')

    d = d[1:-1,1:-1]
    d[np.abs(d) < 1e-3] = 0
    d[d>0] = 1
    d[d<0] = -1
    pl.matshow(d, fignum=False, extent=[-R,R,-R,R], alpha=0.5)
Esempio n. 41
0
 def _flatness(self, data: np.ndarray) -> np.ndarray:
     window = self.window
     if self._kernel is None or self._kernel[0] != (window, self.truncate,
                                                    self.shape):
         if self.shape == 'square':
             kern = np.ones((window * 2, ), dtype='f4')
             kern[-window:] = -1.
             kern /= self.window
         elif self.shape == 'gaussian':
             kern = np.linspace(-self.truncate,
                                self.truncate,
                                window * self.truncate * 2 + 1,
                                dtype='f4')
             kern *= -np.exp(-kern**2 / 2.)
             kern /= abs(kern[:len(kern) // 2 + 1].sum())
         else:
             raise KeyError(self.shape +
                            ' is unknown in DerivateSplitDetector')
         self._kernel = (window, self.truncate, self.shape), kern
     else:
         kern = self._kernel[1]
     delta = correlate1d(data, kern, mode='nearest')
     return np.abs(delta)
Esempio n. 42
0
def _z_to_r_enhanced_mdcorr(z, xmode='reflect', ymode='wrap'):
    """multidimensional version

    assuming the two last dimensions represent a 2-D image
    Uses :func:`scipy:scipy.ndimage.filters.correlate` to reduce the number of
    for-loops even more.
    """
    # get the shape of the input
    # dimy = z.shape[-2]
    # dimx = z.shape[-1]

    # calculate the decibel values from the input
    db = decibel(z)
    # calculate the shower differences by 1-d correlation with a differencing
    # kernel
    db_diffx = np.abs(
        filters.correlate1d(db, [1, -1], axis=-1, mode=xmode, origin=-1))
    db_diffy = np.abs(
        filters.correlate1d(db, [1, -1], axis=-2, mode=ymode, origin=-1))
    diffxmode = 'wrap' if xmode == 'wrap' else 'constant'
    diffymode = 'wrap' if ymode == 'wrap' else 'constant'
    diffx_sum1 = filters.correlate1d(db_diffx, [1, 1, 1],
                                     axis=-2,
                                     mode=diffymode)
    diffxsum = filters.correlate1d(diffx_sum1, [1, 1, 0],
                                   axis=-1,
                                   mode=diffxmode)
    diffy_sum1 = filters.correlate1d(db_diffy, [1, 1, 1],
                                     axis=-1,
                                     mode=diffxmode)
    diffysum = filters.correlate1d(diffy_sum1, [1, 1, 0],
                                   axis=-2,
                                   mode=diffymode)

    divider = np.ones(db.shape) * 12.
    if xmode != 'wrap':
        divider[..., [0, -1]] = np.rint(
            (divider[..., [0, -1]] + 1) / 1.618) - 1
    if ymode != 'wrap':
        divider[..., [0, -1], :] = np.rint(
            (divider[..., [0, -1], :] + 1) / 1.618) - 1

    # the shower index is the sum of the x- and y-differences
    si = (diffxsum + diffysum) / divider

    # set up our rainfall output array
    r = np.zeros(z.shape)

    gt44 = db > 44.
    r[gt44] = z_to_r(z[gt44], a=77, b=1.9)
    si[gt44] = -1.
    # the same is true for values between 36.5 and 44 dBZ
    bt3644 = (db >= 36.5) & (db <= 44.)
    r[bt3644] = z_to_r(z[bt3644], a=200, b=1.6)
    si[bt3644] = -2.

    si1 = (si >= 0.)
    si2 = si1 & (si < 3.5)
    si3 = si1 & ~si2 & (si <= 7.5)
    si4 = si > 7.5

    r[si2] = z_to_r(z[si2], a=125, b=1.4)
    r[si3] = z_to_r(z[si3], a=200, b=1.6)
    r[si4] = z_to_r(z[si4], a=320, b=1.4)

    return r, si
Esempio n. 43
0
def sinc_filter1d(
    input, sigma, axis=-1, order=0, output=None, mode="reflect", cval=0.0, truncate=6.0
):
    lw = int(truncate * sigma + 0.5)
    weights = _sinc_kernel1d(sigma, lw)[::-1]
    return correlate1d(input, weights, axis, output, mode, cval, 0)
Esempio n. 44
0
    def perform_map_analysis(self):
        # Smoothing (presently performed for individual detectors)
        # FIX: MAY NEED A SUM MAP THAT ALIGNS DETECTOR CHANNELS AND SUMS
        sigma_spec = self.settings[
            'spectral_smooth_gauss_sigma']  # In terms of frames?
        width_spec = self.settings[
            'spectral_smooth_savgol_width']  # In terms of frames?
        order_spec = self.settings['spectral_smooth_savgol_order']

        if self.settings['spectral_smooth_type'] == 'Gaussian':
            print('spectral smoothing...')
            self.current_auger_map = gaussian_filter(self.current_auger_map,
                                                     (sigma_spec, 0, 0, 0, 0))
        elif self.settings['spectral_smooth_type'] == 'Savitzky-Golay':
            # Currently always uses 4th order polynomial to fit
            print('spectral smoothing...')
            self.current_auger_map = savgol_filter(self.current_auger_map,
                                                   1 + 2 * width_spec,
                                                   order_spec,
                                                   axis=0)

        # Background subtraction (implemented detector-wise currently)
        # NOTE: INSUFFICIENT SPATIAL SMOOTHING MAY GIVE INACCURATE OR EVEN INF RESULTS
        if not (self.settings['subtract_ke1'] == 'None'):
            print('Performing background subtraction...')
            for iDet in range(self.current_auger_map.shape[-1]):
                # Fit a power law to the background
                # get background range
                ke_min = self.settings['ke1_start']
                ke_max = self.settings['ke1_stop']
                fit_map = (self.ke[iDet] > ke_min) * (self.ke[iDet] < ke_max)
                ke_to_fit = self.ke[iDet, fit_map]
                spec_to_fit = self.current_auger_map[fit_map, 0, :, :,
                                                     iDet].transpose(1, 2, 0)

                if self.settings['subtract_ke1'] == 'Power Law':
                    # Fit power law
                    A, m = self.fit_powerlaw(ke_to_fit, spec_to_fit)
                    ke_mat = np.tile(self.ke[iDet],
                                     (spec_to_fit.shape[0],
                                      spec_to_fit.shape[1], 1)).transpose(
                                          2, 0, 1)
                    A = np.tile(A, (self.ke.shape[1], 1, 1))
                    m = np.tile(m, (self.ke.shape[1], 1, 1))
                    bg = A * ke_mat**m
                elif self.settings['subtract_ke1'] == 'Linear':
                    # Fit line
                    m, b = self.fit_line(ke_to_fit, spec_to_fit)
                    ke_mat = np.tile(self.ke[iDet],
                                     (spec_to_fit.shape[0],
                                      spec_to_fit.shape[1], 1)).transpose(
                                          2, 0, 1)
                    m = np.tile(m, (self.ke.shape[1], 1, 1))
                    b = np.tile(b, (self.ke.shape[1], 1, 1))
                    bg = m * ke_mat + b

                self.current_auger_map[:, 0, :, :, iDet] -= bg

        if self.settings['subtract_tougaard']:
            R_loss = self.settings['R_loss']
            E_loss = self.settings['E_loss']
            dE = self.ke[0, 1] - self.ke[0, 0]
            # Always use a kernel out to 3 * E_loss to ensure enough feature size
            ke_kernel = np.arange(0, 3 * E_loss, abs(dE))
            if not np.mod(len(ke_kernel), 2) == 0:
                ke_kernel = np.arange(0, 3 * E_loss + dE, abs(dE))
            self.K_toug = (8.0 / np.pi**2) * R_loss * E_loss**2 * ke_kernel / (
                (2.0 * E_loss / np.pi)**2 + ke_kernel**2)**2
            # Normalize the kernel so the its area is equal to R_loss
            self.K_toug /= (np.sum(self.K_toug) * dE) / R_loss
            self.current_auger_map -= dE * correlate1d(
                self.current_auger_map,
                self.K_toug,
                mode='nearest',
                origin=-len(ke_kernel) // 2,
                axis=0)
Esempio n. 45
0
def momentConvolve2d(data, k, sigma, middleOnly=False):
    """Convolve an image with coefficient kernels to compute local 'moments'

    @param data       The input image
    @param k          A vector of indices (e.g. -3,-2,-1,0,1,2,3 )
    @param sigma      Gaussian sigma for an overall smoothing to avoid blowing up higher-order moments
    @param middleOnly Boolean to return the central pixel only (used for calibration images)

    return ImageMoment  A container with attributes for each moment: .i0 .ix .iy .ixx .iyy .ixy etc.

    Each of these convolutions uses a separable kernel, and many share a common convolution
    in at least one dimension.
    """

    # moments are  e.g.   sum(I*x) / sum(I)

    gauss = np.exp(-k**2 / (2.0 * sigma**2))

    kk = k * k
    k3 = kk * k
    k4 = kk * kk

    mode = 'reflect'

    # start with convolutions with our Gaussian in separately in X and Y
    gaussX = filt.correlate1d(data, gauss, mode=mode)
    gaussY = filt.correlate1d(data, gauss, mode=mode, axis=0)

    # zeroth order moment (i.e. a sum), convolve the X gaussian along Y
    sumI = filt.correlate1d(gaussX, gauss, mode=mode, axis=0)
    sumI[np.where(sumI == 0)] = 1.0e-7

    # normalize up front
    gaussX /= sumI
    gaussY /= sumI

    # Now use gaussX and gaussY to get the moments
    ix = filt.correlate1d(gaussY, gauss * k, mode=mode)
    iy = filt.correlate1d(gaussX, gauss * k, mode=mode, axis=0)
    ixx = filt.correlate1d(gaussY, gauss * kk, mode=mode)
    iyy = filt.correlate1d(gaussX, gauss * kk, mode=mode, axis=0)

    # cross term requires special attention.  Start from scratch.
    ixy0 = filt.correlate1d(data, gauss * k, mode=mode)
    ixy = filt.correlate1d(ixy0, gauss * k, mode=mode, axis=0) / sumI

    # don't bother with 3rd order cross terms
    ix3 = filt.correlate1d(gaussY, gauss * k3, mode=mode)
    iy3 = filt.correlate1d(gaussX, gauss * k3, mode=mode, axis=0)

    values = sumI, ix, iy, ixx, iyy, ixy, ix3, iy3
    if middleOnly:
        ny, nx = data.shape
        values = [x[ny // 2, nx // 2] for x in values]
    return ImageMoment(*values)
Esempio n. 46
0
    def __dense_sift(im, out, nthreads):
        # CHANGED: forced grid_spacing = 1 and patch_size is now a constant defined globally
        # CHANGED: the input im can be modified slightly in place (scaling)
        # CHANGED: no longer returns optional grid
        # CHANGED: a few pieces have been removed and made into Cython functions for speed and multi-threading
        from numpy import empty, empty_like, sqrt, arctan2, cos, sin
        from scipy.ndimage.filters import correlate1d
        from ._base import run_threads
        from ._sift import orientations, neighborhoods, normalize  #pylint: disable=no-name-in-module

        # TODO: don't precompute division?
        # TODO: this sometimes causes a divide-by-zero
        im *= 1 / im.max(
        )  # can modify im here since it is always the padded image

        H, W = im.shape

        dgauss_filter = SIFT.__dgauss_filter
        tmp = empty_like(im)  # INTERMEDIATES: im.shape

        # vertical edges
        correlate1d(im, dgauss_filter[1], 0, mode='constant', output=tmp)
        imx = correlate1d(tmp, dgauss_filter[0], 1,
                          mode='constant')  # INTERMEDIATE: im.shape

        # horizontal edges
        correlate1d(im, dgauss_filter[0], 0, mode='constant', output=tmp)
        imy = correlate1d(tmp, dgauss_filter[1], 1,
                          mode='constant')  # INTERMEDIATE: im.shape

        im_theta = arctan2(imy, imx, out=tmp)
        im_cos, im_sin = cos(im_theta), sin(
            im_theta, out=tmp)  # INTERMEDIATE: im.shape (cos)
        del im_theta, tmp

        imx *= imx
        imy *= imy
        imx += imy
        im_mag = sqrt(imx, out=imx)  # gradient magnitude
        del imx, imy  # cleanup 1 intermediate (imy)

        num_angles, num_bins, patch_sz = SIFT.__num_angles, SIFT.__num_bins, SIFT.__patch_sz
        im_orientation = orientations(im_mag,
                                      im_cos,
                                      im_sin,
                                      num_angles,
                                      nthreads=nthreads)
        del im_mag, im_cos, im_sin  # cleanup 3 intermediates

        def thread(_, start, stop):
            weight_x, weight_x_origin = SIFT.__weight_x, SIFT.__weight_x_origin
            tmp = empty_like(im)  # INTERMEDIATE: im.shape * nthreads
            for a in xrange(start, stop):
                #pylint: disable=unsubscriptable-object
                correlate1d(im_orientation[a, :, :],
                            weight_x,
                            0,
                            mode='constant',
                            origin=weight_x_origin,
                            output=tmp)
                correlate1d(tmp,
                            weight_x,
                            1,
                            mode='constant',
                            origin=weight_x_origin,
                            output=im_orientation[a, :, :])

        # OPT: these take about 12% of the time of SIFT (when single-threaded)
        run_threads(thread, num_angles)

        H, W = H - patch_sz + 2, W - patch_sz + 2
        if out is None:
            out = empty((num_bins * num_bins * num_angles, H, W),
                        dtype=im.dtype)

        # OPT: takes about 19% of time in SIFT (when single-threaded)
        sift_arr = out.reshape((num_bins * num_bins, num_angles, H, W))
        neighborhoods(sift_arr,
                      im_orientation,
                      num_bins,
                      patch_sz // num_bins,
                      nthreads=nthreads)
        #del im_orientation # cleanup large intermediate
        im_orientation = None  # cannot delete since it is used in a nested scope, this should work though

        # normalize SIFT descriptors
        # OPT: takes about 55% of time in SIFT (when single-threaded)
        normalize(out.reshape((num_bins * num_bins * num_angles, H * W)),
                  nthreads=nthreads)
        return out
 def filter(self, field3D):
     weights = np.ones((2*self.filterHalf + 1), dtype=np.float32) / (2*self.filterHalf + 1)
     result = correlate1d(field3D, weights, axis = 0)
     result = correlate1d(result, weights, axis = 1)
     return correlate1d(result, weights, axis = 2)
Esempio n. 48
0
    def demosaic(self, input, filter=None):
        '''
        Demosaics a four channel Bayer image in RGBG ordering into a
        three channel RGB image.  Each Bayer color channel is upsampled;
        the green channels are averaged and the resulting frames are
        assembled into a single image.
        '''
#        for d in range(input.shape[2]):
#            imgutil.imageInfo(input[..., d], "input {0}".format(d))
        
        # build an image to hold the upsampled image data in RGBG format
        h, w = input.shape[0:2]
        channels = numpy.zeros((2 * h, 2 * w, 4), dtype=numpy.float32)
        
        if filter is None:
            # fill in image data in the appropriate spot based on Bayer pattern mask
            # determine the color filter ordering
            filterCint = ctypes.c_int(0) 
            videolib.FV_getColorFilter(self.fcd, ctypes.byref(filterCint))
            filter = filterCint.value
            
        if filter not in FILTERS_INV:
            print "FirewireVideo.demosaic: Warning, the color filter pattern from the camera was invalid ({0}), assuming BGRG".format(filter)
            filter = DC1394_COLOR_FILTER_GBRG            
                
        # reorder the split channels
        if filter == DC1394_COLOR_FILTER_RGGB:
            channels[0::2, 0::2, 0] = input[..., 0]
            channels[0::2, 1::2, 1] = input[..., 1]
            channels[1::2, 1::2, 2] = input[..., 2]
            channels[1::2, 0::2, 3] = input[..., 3]
        elif filter == DC1394_COLOR_FILTER_GBRG:
            channels[1::2, 0::2, 0] = input[..., 0]
            channels[0::2, 0::2, 1] = input[..., 1]
            channels[0::2, 1::2, 2] = input[..., 2]
            channels[1::2, 1::2, 3] = input[..., 3]
        elif filter == DC1394_COLOR_FILTER_GRBG:
            channels[0::2, 1::2, 0] = input[..., 0]
            channels[0::2, 0::2, 1] = input[..., 1]
            channels[1::2, 0::2, 2] = input[..., 2]
            channels[1::2, 1::2, 3] = input[..., 3]
        elif filter == DC1394_COLOR_FILTER_BGGR:
            channels[1::2, 1::2, 0] = input[..., 0]
            channels[0::2, 1::2, 1] = input[..., 1]
            channels[0::2, 0::2, 2] = input[..., 2]
            channels[1::2, 0::2, 3] = input[..., 3]
        
        # interpolate missing data values on each channel
        kernel = 1.0 / 8 * numpy.array([1, 4, 6, 4, 1])
        channels = filters.correlate1d(channels, kernel, 0, mode="constant")
        channels = filters.correlate1d(channels, kernel, 1, mode="constant")
                
        # build RGB result
        output = numpy.zeros((2 * h, 2 * w, 3), dtype=input.dtype)
        output[..., 0] = channels[..., 0]
        output[..., 2] = channels[..., 2]
        output[..., 1] = channels[..., numpy.r_[1, 3]].mean(2)

#        for d in range(output.shape[2]):
#            imgutil.imageInfo(output[..., d], "output {0}".format(d))
        
        return output
Esempio n. 49
0
mask = ModelMask(0, 0, w, h)
mm = [{rex: mask, psf: mask}]
tractor.setModelMasks(mm)

plt.clf()
row = psfimg[:, psfw // 2] * flux
row = np.hstack((row, np.zeros(5)))
plt.plot(row, 'k.-')

from astrometry.util.miscutils import lanczos_filter
from scipy.ndimage.filters import correlate1d
for mux in [0.1, 0.25, 0.5]:
    L = 3
    Lx = lanczos_filter(L, np.arange(-L, L + 1) + mux)
    Lx /= Lx.sum()
    cx = correlate1d(row, Lx, mode='constant')
    plt.plot(cx, '.-')
plt.yscale('symlog', linthreshy=1e-10)
ps.savefig()

for re in [10., 1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-6, 1e-10]:
    #for re in [2e-3, 1.05e-3, 1e-3, 0.95e-3, 5e-4]:
    #for re in np.linspace(0.9e-3, 1.1e-3, 25):
    rex.pos.x = psf.pos.x = 12.
    rex.pos.y = psf.pos.y = 16.
    rex.shape.logre = np.log(re)
    print('Rex:', rex)
    cat[0] = rex
    rexmod = tractor.getModelImage(0)
    cat[0] = psf
    psfmod = tractor.getModelImage(0)
def getDeriv(image,
             weights=[3. / 16, 10. / 16, 3. / 16],
             axes=[0],
             mode="reflect",
             cval=0.0):
    """
    Calculates a first or second derivative on a multi-dimensional image
    array by the method of finite differences using a 3X3 stencil.
    Images in principle need not necessarily be 2D; i.e., 3D tomographic
    images should work also, however, "caveat emptor" as this latter
    functionality has not yet actually been tested fully.

    Parameters
    ----------

    image: array_like

        Array containing grayscale image data.  Only grayscale pixel
        values are supported--cannot handle 3-channel color.

    weights: array, optional

        1D sequence of three numbers representing the type of finite
        differences derivative (Prewitt, Sobel, Scharr, etc.) to compute.
        Defaults to [3./16, 10./16, 3./16], i.e., Scharr type.  It is
        recommended that this sequence should be normalized so that all
        components sum to 1.  If not, the function will still return a
        result, however, cross derivative (dxdy type) results will be
        scaled incorrectly with respect to 1st derivatives and non-cross
        2nd derivatives (e.g., dxdx, dydy).

    axes: scalar or array_like, optional

        Either a single value (1st derivative case) or two values (2nd
        derivative) indicating axes along which derivatives are to be
        taken.  Examples:

            axes=0         1st derivative, x-axis
            axes=[0]       Also indicates 1st derivative, x-axis
            axes=(1, 1)    2nd derivative, y-axis (i.e, dydy type)
            axes=[0, 2]    2nd derivative, x- and z-axes (i.e., dxdz,
                               assuming a tomographic style image with
                               three axes)

    mode: ('reflect', 'constant', 'nearest', 'mirror', 'wrap')

        Controls how edge pixels in the input image are treated.  See
        scipy.ndimage.filters.correlate1d() for details.

    cval: scalar, optional

        Only meaningful if mode is set to 'constant'.  See
        scipy.ndimage.filters.correlate1d() for details.

    Returns
    -------

    output: ndarray

        An estimate of first or second partial derivative with respect
        to image brightness at each pixel.

    """
    """Check and/or condition the input variables"""
    # Force treatment as float numpy array to avoid rounding errors later
    image = np.asarray(image, dtype=float)

    wmsg = 'weights input variable must be an array or list with ' + \
           'exactly three elements'
    try:
        nw = len(weights)  # Fails if weights is not iterable type
    except:
        raise TypeError(wmsg)
    if nw != 3:  # If weights is iterable, but still not correct length...
        raise ValueError(wmsg)
    """Set appropriate weights, and slightly reconfigure axes specification"""
    try:  # Assume axes input value is iterable
        nx = len(axes)  # Will raise a TypeError if axes is not iterable
    except TypeError:
        # First derivative
        wght = [-0.5, 0, 0.5]
        myaxes = [axes]  # Force myaxes to be iterable list containing one item
        nx = 0

    # Skip the rest, if axes input value was scalar (i.e., not iterable)
    if nx == 0:
        pass
    # Alternative first derivative, if axes input is iterable
    elif nx == 1:
        wght = [-0.5, 0, 0.5]
        myaxes = axes
    elif nx == 2:
        # Second derivative, along same axis twice
        if axes[0] == axes[1]:
            wght = [1.0, -2.0, 1.0]
            myaxes = [axes[0]]
        # Second derivative, along orthogonal axes
        else:
            wght = [-0.5, 0, 0.5]
            myaxes = axes
    else:
        raise ValueError('Too many axes: 3rd derivatives and higher are ' +
                         'not yet supported')
    """Compute the derivative!!!"""
    for ii in myaxes:
        # Use fast compiled code from scipy.ndimage._nd_image.pyd
        output = filters.correlate1d(image, wght, ii, mode=mode, cval=cval)
    """Apply smoothing weights (Prewitt, Sobel, Scharr, or whatever the
    user has selected) to all remaining axes"""
    # Get a list of all other axes.  For 2D images, this will produce either
    # a null list (in the dxdy case) or at most one other axis.  For 3D
    # images (e.g., such as a tomographic image), there will be either one
    # or two other axes.
    otheraxes = [ii for ii in range(image.ndim) if ii not in myaxes]
    for ii in otheraxes:
        output = filters.correlate1d(output, weights, ii, mode=mode, cval=cval)

    return output
Esempio n. 51
0
    def demosaic(self, input, filter=None):
        '''
        Demosaics a four channel Bayer image in RGBG ordering into a
        three channel RGB image.  Each Bayer color channel is upsampled;
        the green channels are averaged and the resulting frames are
        assembled into a single image.
        '''
#        for d in range(input.shape[2]):
#            imgutil.imageInfo(input[..., d], "input {0}".format(d))
        
        # build an image to hold the upsampled image data in RGBG format
        h, w = input.shape[0:2]
        channels = numpy.zeros((2 * h, 2 * w, 4), dtype=numpy.float32)
        
        if filter is None:
            # fill in image data in the appropriate spot based on Bayer pattern mask
            # determine the color filter ordering
            filterCint = ctypes.c_int(0) 
            videolib.FV_getColorFilter(self.fcd, ctypes.byref(filterCint))
            filter = filterCint.value
            
        if filter not in FILTERS_INV:
            print "FirewireVideo.demosaic: Warning, the color filter pattern from the camera was invalid ({0}), assuming BGRG".format(filter)
            filter = DC1394_COLOR_FILTER_GBRG            
                
        # reorder the split channels
        if filter == DC1394_COLOR_FILTER_RGGB:
            channels[0::2, 0::2, 0] = input[..., 0]
            channels[0::2, 1::2, 1] = input[..., 1]
            channels[1::2, 1::2, 2] = input[..., 2]
            channels[1::2, 0::2, 3] = input[..., 3]
        elif filter == DC1394_COLOR_FILTER_GBRG:
            channels[1::2, 0::2, 0] = input[..., 0]
            channels[0::2, 0::2, 1] = input[..., 1]
            channels[0::2, 1::2, 2] = input[..., 2]
            channels[1::2, 1::2, 3] = input[..., 3]
        elif filter == DC1394_COLOR_FILTER_GRBG:
            channels[0::2, 1::2, 0] = input[..., 0]
            channels[0::2, 0::2, 1] = input[..., 1]
            channels[1::2, 0::2, 2] = input[..., 2]
            channels[1::2, 1::2, 3] = input[..., 3]
        elif filter == DC1394_COLOR_FILTER_BGGR:
            channels[1::2, 1::2, 0] = input[..., 0]
            channels[0::2, 1::2, 1] = input[..., 1]
            channels[0::2, 0::2, 2] = input[..., 2]
            channels[1::2, 0::2, 3] = input[..., 3]
        
        # interpolate missing data values on each channel
        kernel = 1.0 / 8 * numpy.array([1, 4, 6, 4, 1])
        channels = filters.correlate1d(channels, kernel, 0, mode="constant")
        channels = filters.correlate1d(channels, kernel, 1, mode="constant")
                
        # build RGB result
        output = numpy.zeros((2 * h, 2 * w, 3), dtype=input.dtype)
        output[..., 0] = channels[..., 0]
        output[..., 2] = channels[..., 2]
        output[..., 1] = channels[..., numpy.r_[1, 3]].mean(2)

#        for d in range(output.shape[2]):
#            imgutil.imageInfo(output[..., d], "output {0}".format(d))
        
        return output
Esempio n. 52
0
def getDeriv(image, weights=[3./16, 10./16, 3./16], axes=[0], mode="reflect",
             cval=0.0):

    """
    Calculates a first or second derivative on a multi-dimensional image
    array by the method of finite differences using a 3X3 stencil.
    Images in principle need not necessarily be 2D; i.e., 3D tomographic
    images should work also, however, "caveat emptor" as this latter
    functionality has not yet actually been tested fully.
    
    Parameters
    ----------

    image: array_like

        Array containing grayscale image data.  Only grayscale pixel
        values are supported--cannot handle 3-channel color.

    weights: array, optional

        1D sequence of three numbers representing the type of finite
        differences derivative (Prewitt, Sobel, Scharr, etc.) to compute.
        Defaults to [3./16, 10./16, 3./16], i.e., Scharr type.  It is
        recommended that this sequence should be normalized so that all
        components sum to 1.  If not, the function will still return a
        result, however, cross derivative (dxdy type) results will be
        scaled incorrectly with respect to 1st derivatives and non-cross
        2nd derivatives (e.g., dxdx, dydy).

    axes: scalar or array_like, optional

        Either a single value (1st derivative case) or two values (2nd
        derivative) indicating axes along which derivatives are to be
        taken.  Examples:

            axes=0         1st derivative, x-axis
            axes=[0]       Also indicates 1st derivative, x-axis
            axes=(1, 1)    2nd derivative, y-axis (i.e, dydy type)
            axes=[0, 2]    2nd derivative, x- and z-axes (i.e., dxdz,
                               assuming a tomographic style image with
                               three axes)

    mode: ('reflect', 'constant', 'nearest', 'mirror', 'wrap')

        Controls how edge pixels in the input image are treated.  See
        scipy.ndimage.filters.correlate1d() for details.

    cval: scalar, optional

        Only meaningful if mode is set to 'constant'.  See
        scipy.ndimage.filters.correlate1d() for details.

    Returns
    -------

    output: ndarray

        An estimate of first or second partial derivative with respect
        to image brightness at each pixel.

    """

    """Check and/or condition the input variables"""
    # Force treatment as float numpy array to avoid rounding errors later
    image = np.asarray(image, dtype=float)

    wmsg = 'weights input variable must be an array or list with ' + \
           'exactly three elements'
    try:
        nw = len(weights) # Fails if weights is not iterable type
    except:
        raise TypeError(wmsg)
    if nw != 3: # If weights is iterable, but still not correct length...
        raise ValueError(wmsg)

    """Set appropriate weights, and slightly reconfigure axes specification"""
    try: # Assume axes input value is iterable
        nx = len(axes) # Will raise a TypeError if axes is not iterable
    except TypeError:
        # First derivative
        wght = [-0.5, 0, 0.5]
        myaxes = [axes] # Force myaxes to be iterable list containing one item
        nx = 0

    # Skip the rest, if axes input value was scalar (i.e., not iterable)
    if nx == 0:
        pass
    # Alternative first derivative, if axes input is iterable
    elif nx == 1:
        wght = [-0.5, 0, 0.5]
        myaxes = axes
    elif nx == 2:
        # Second derivative, along same axis twice
        if axes[0] == axes[1]:
            wght = [1.0, -2.0, 1.0]
            myaxes = [axes[0]]
        # Second derivative, along orthogonal axes
        else:
            wght = [-0.5, 0, 0.5]
            myaxes = axes
    else:
        raise ValueError('Too many axes: 3rd derivatives and higher are ' +
                         'not yet supported')
    
    """Compute the derivative!!!"""
    for ii in myaxes:
        # Use fast compiled code from scipy.ndimage._nd_image.pyd
        output = filters.correlate1d(image, wght, ii, mode=mode, cval=cval)

    """Apply smoothing weights (Prewitt, Sobel, Scharr, or whatever the
    user has selected) to all remaining axes"""
    # Get a list of all other axes.  For 2D images, this will produce either
    # a null list (in the dxdy case) or at most one other axis.  For 3D
    # images (e.g., such as a tomographic image), there will be either one
    # or two other axes.
    otheraxes = [ii for ii in range(image.ndim) if ii not in myaxes]
    for ii in otheraxes:
        output = filters.correlate1d(output, weights, ii, mode=mode, cval=cval)

    return output
Esempio n. 53
0
 def _derivative2(self,inumpyut, axis, output=None, mode="reflect", cval=0.0):
     """Computes spatial derivative to get propagation."""
     return correlate1d(inumpyut, [1, -2, 1], axis, output, mode, cval, 0)
Esempio n. 54
0
def momentConvolve2d(data, k, sigma, middleOnly=False):
    """Convolve an image with coefficient kernels to compute local 'moments'

    @param data       The input image
    @param k          A vector of indices (e.g. -3,-2,-1,0,1,2,3 )
    @param sigma      Gaussian sigma for an overall smoothing to avoid blowing up higher-order moments
    @param middleOnly Boolean to return the central pixel only (used for calibration images)

    return ImageMoment  A container with attributes for each moment: .i0 .ix .iy .ixx .iyy .ixy etc.

    Each of these convolutions uses a separable kernel, and many share a common convolution
    in at least one dimension.
    """
    
    # moments are  e.g.   sum(I*x) / sum(I)
    
    gauss = np.exp(-k**2/(2.0*sigma**2))
    
    kk = k*k
    k3 = kk*k
    k4 = kk*kk
    
    mode = 'reflect'

    # start with convolutions with our Gaussian in separately in X and Y
    gaussX = filt.correlate1d(data, gauss, mode=mode)
    gaussY = filt.correlate1d(data, gauss, mode=mode, axis=0)

    # zeroth order moment (i.e. a sum), convolve the X gaussian along Y
    sumI = filt.correlate1d(gaussX, gauss, mode=mode, axis=0)
    sumI[np.where(sumI == 0)] = 1.0e-7

    # normalize up front
    gaussX /= sumI
    gaussY /= sumI
    
    # Now use gaussX and gaussY to get the moments
    ix   = filt.correlate1d(gaussY, gauss*k, mode=mode)
    iy   = filt.correlate1d(gaussX, gauss*k, mode=mode, axis=0)
    ixx  = filt.correlate1d(gaussY, gauss*kk, mode=mode)
    iyy  = filt.correlate1d(gaussX, gauss*kk, mode=mode, axis=0)

    # cross term requires special attention.  Start from scratch.
    ixy0 = filt.correlate1d(data, gauss*k, mode=mode)
    ixy  = filt.correlate1d(ixy0, gauss*k, mode=mode, axis=0) /sumI

    # don't bother with 3rd order cross terms
    ix3  = filt.correlate1d(gaussY, gauss*k3, mode=mode)
    iy3  = filt.correlate1d(gaussX, gauss*k3, mode=mode, axis=0)

    values = sumI, ix, iy, ixx, iyy, ixy, ix3, iy3
    if middleOnly:
        ny, nx = data.shape
        values = [ x[ny//2,nx//2] for x in values ]
    return ImageMoment(*values)