コード例 #1
0
def farid2_(input, axis=-1, output=None, mode="reflect", cval=0.0):
    """Calculate a size 5 Farid second derivative filter.
	Parameters
	----------
	%(input)s
	%(axis)s
	%(output)s
	%(mode)s
	%(cval)s
	"""
    input = np.asarray(input)
    axis = ndi._ni_support._check_axis(axis, input.ndim)
    output, return_value = ndi._ni_support._get_output(output, input)
    ndi.correlate1d(input, [0.232905, 0.002668, -0.471147, 0.002668, 0.232905],
                    axis, output, mode, cval, 0)
    axes = [ii for ii in range(input.ndim) if ii != axis]
    for ii in axes:
        ndi.correlate1d(
            output,
            [0.030320, 0.249724, 0.439911, 0.249724, 0.030320],
            ii,
            output,
            mode,
            cval,
            0,
        )
    return return_value
コード例 #2
0
ファイル: derivatives.py プロジェクト: gianmatharu/Aniso
def compute_hessian(image):
    """ Compute Hessian components of 2D image using finite difference.

    Parameters
    ----------
    image: array_like
        Input image.

    Returns
    -------
    hxx, hxy, hyy: ndarray, ndim=2
        Gradients in x, y directions respectively.
    """

    image = np.asarray(image)

    if image.ndim != 2:
        raise ValueError('Image must be 2-dimensional!')

    hyy = correlate1d(image, [1, -2, 1], 0, mode='constant', cval=0.0)
    hxx = correlate1d(image, [1, -2, 1], 1, mode='constant', cval=0.0)

    imx = correlate1d(image, [-0.5, 0, 0.5], 1, mode='constant', cval=0.0)
    hxy = correlate1d(imx, [-0.5, 0, 0.5], 0, mode='constant', cval=0.0)

    return hxx, hxy, hyy
コード例 #3
0
ファイル: psf.py プロジェクト: astroweaver/tractor
def lanczos_shift_image(img, dx, dy, inplace=False, force_python=False):
    from scipy.ndimage import correlate1d
    from astrometry.util.miscutils import lanczos_filter

    L = 3
    Lx = lanczos_filter(L, np.arange(-L, L + 1) + dx)
    Ly = lanczos_filter(L, np.arange(-L, L + 1) + dy)
    # Normalize the Lanczos interpolants (preserve flux)
    Lx /= Lx.sum()
    Ly /= Ly.sum()

    H, W = img.shape
    #print('lanczos_shift_image: size', W, H)
    #print('mp_fourier:', mp_fourier)
    if (mp_fourier is None or force_python or W <= 8 or H <= 8
            or H > work_corr7f.shape[0] or W > work_corr7f.shape[1]):
        sx = correlate1d(img, Lx, axis=1, mode='constant')
        outimg = correlate1d(sx, Ly, axis=0, mode='constant')
    else:
        assert (len(Lx) == 7)
        assert (len(Ly) == 7)
        if inplace:
            assert (img.dtype == np.float32)
            outimg = img
        else:
            outimg = np.empty(img.shape, np.float32)
            outimg[:, :] = img
        mp_fourier.correlate7f(outimg, Lx, Ly, work_corr7f)

    assert (np.all(np.isfinite(outimg)))
    return outimg
コード例 #4
0
ファイル: derivatives.py プロジェクト: gianmatharu/Aniso
def compute_derivatives(image):
    """ Compute gradient of 2D image with centered finite difference approximation.

    Parameters
    ----------
    image: array_like
        Input image.

    Returns
    -------
    imx, imy: ndarray, ndim=2
        Gradients in x, y directions respectively.
    """

    image = np.asarray(image)

    if image.ndim != 2:
        raise ValueError('Image must be 2-dimensional!')

    # 2nd order stencil
    stencil = [-0.5, 0, 0.5]

    imx = correlate1d(image, stencil, 1, mode='constant', cval=0.0)
    imy = correlate1d(image, stencil, 0, mode='constant', cval=0.0)

    return imx, imy
コード例 #5
0
def farid5(input, axis=-1, output=None, mode="reflect", cval=0.0):
    """Calculate a size 5 Farid first derivative filter.
	Parameters
	----------
	%(input)s
	%(axis)s
	%(output)s
	%(mode)s
	%(cval)s
	"""
    input = np.asarray(input)
    axis = ndi._ni_support._check_axis(axis, input.ndim)
    output, return_value = ndi._ni_support._get_output(output, input)
    ndi.correlate1d(input,
                    [-0.109604, -0.276691, 0.000000, 0.276691, 0.109604], axis,
                    output, mode, cval, 0)
    axes = [ii for ii in range(input.ndim) if ii != axis]
    for ii in axes:
        ndi.correlate1d(
            output,
            [0.037659, 0.249153, 0.426375, 0.249153, 0.037659],
            ii,
            output,
            mode,
            cval,
            0,
        )
    return return_value
コード例 #6
0
def lanczos_shift_image(img, dx, dy, inplace=False, force_python=False):
    global mp_fourier
    if mp_fourier == -1:
        try:
            from tractor import mp_fourier
        except:
            print(
                'tractor.psf: failed to import C version of mp_fourier library.  Falling back to python version.'
            )
            mp_fourier = None

    H, W = img.shape
    if (mp_fourier is None or force_python or W <= 8 or H <= 8
            or H > work_corr7f.shape[0] or W > work_corr7f.shape[1]):
        # fallback to python:
        from scipy.ndimage import correlate1d
        from astrometry.util.miscutils import lanczos_filter
        L = 3
        Lx = lanczos_filter(L, np.arange(-L, L + 1) + dx)
        Ly = lanczos_filter(L, np.arange(-L, L + 1) + dy)
        # Normalize the Lanczos interpolants (preserve flux)
        Lx /= Lx.sum()
        Ly /= Ly.sum()
        sx = correlate1d(img, Lx, axis=1, mode='constant')
        outimg = correlate1d(sx, Ly, axis=0, mode='constant')
        return outimg

    outimg = np.empty(img.shape, np.float32)
    mp_fourier.lanczos_shift_3f(img, outimg, dx, dy, work_corr7f)
    # yuck!  (don't change this without ensuring the "restrict" keyword still applies
    # in lanczos_shift_3f!)
    if inplace:
        img[:, :] = outimg
    return outimg
コード例 #7
0
def kroon3(input, axis=-1, output=None, mode="reflect", cval=0.0):
    """Calculate a size 3 Kroon first derivative filter.
	Parameters
	----------
	%(input)s
	%(axis)s
	%(output)s
	%(mode)s
	%(cval)s
	"""
    input = np.asarray(input)
    axis = ndi._ni_support._check_axis(axis, input.ndim)
    output, return_value = ndi._ni_support._get_output(output, input)
    ndi.correlate1d(input, [-0.5, 0, 0.5], axis, output, mode, cval, 0)
    axes = [ii for ii in range(input.ndim) if ii != axis]
    for ii in axes:
        ndi.correlate1d(
            output,
            [0.178947, 0.642105, 0.178947],
            ii,
            output,
            mode,
            cval,
            0,
        )
    return return_value
コード例 #8
0
ファイル: atten.py プロジェクト: tooowzh/wradlib
def _sector_filter(mask, min_sector_size):
    """Calculate an array of same shape as mask, which is set to 1 in case of \
    at least min_sector_size adjacent values, otherwise it is set to 0.
    """

    kernela = np.ones([1] * (mask.ndim - 1) + [min_sector_size])
    kernelb = np.ones((min_sector_size, ))
    forward_origin = (-(min_sector_size - (min_sector_size // 2)) +
                      min_sector_size % 2)
    backward_origin = (min_sector_size - (min_sector_size // 2)) - 1
    forward_sum = ndimage.correlate1d(mask.astype(np.int),
                                      kernelb,
                                      axis=-1,
                                      mode='wrap',
                                      origin=forward_origin)
    backward_sum = ndimage.correlate1d(mask.astype(np.int),
                                       kernelb,
                                       axis=-1,
                                       mode='wrap',
                                       origin=backward_origin)
    forward_corners = (forward_sum == min_sector_size)
    backward_corners = (backward_sum == min_sector_size)
    forward_large_sectors = np.zeros_like(mask)
    backward_large_sectors = np.zeros_like(mask)
    for iii in range(mask.shape[0]):
        forward_large_sectors[iii] = ndimage.morphology.binary_dilation(
            forward_corners[iii], kernela[0],
            origin=forward_origin).astype(int)
        backward_large_sectors[iii] = ndimage.morphology.binary_dilation(
            backward_corners[iii], kernela[0],
            origin=backward_origin).astype(int)

    return (forward_large_sectors | backward_large_sectors)
コード例 #9
0
ファイル: extlib.py プロジェクト: polashbora/aws_dl
def scharr3(input, axis=-1, output=None, mode="reflect", cval=0.0):
    """Full block first derivative along an axis using a 3 point Scharr filter.
	
	Applies a 3 point Scharr first derivative filter along an axis of the input
	array as per: 
	Scharr, 2005: Optimal derivative filter families for transparent motion
	estimation.
	
	Args:
		input: the array to be filtered.
		axis: optional, specifies the array axis to calculate the
			derivative. Default is the last axis.
		output: optional, an array to store the derivative filter output.
			Should be the same shape as the input array.
		mode: {'reflect', 'constant', 'nearest', 'mirror', 'wrap'} optional,
			specifies how the array boundaries are filtered. Default is
			'reflect'.
		cval: optional, specified value to pad input array if mode is 
			'constant'. Default is 0.0.
			
	Returns:
		derivative filtered array with same shape as input. Note for each array
		dimension indices 1:-1 will be free of boundary effects. 
	"""
    input = np.asarray(input)
    axis = ndi._ni_support._check_axis(axis, input.ndim)
    output = getOutput(output, input)
    ndi.correlate1d(input, [-0.5, 0, 0.5], axis, output, mode, cval, 0)
    axes = [ii for ii in range(input.ndim) if ii != axis]
    for ii in axes:
        ndi.correlate1d(output, [0.12026, 0.75948, 0.12026], ii, output, mode,
                        cval, 0)
    return output
コード例 #10
0
ファイル: convol.py プロジェクト: Hypnus1803/FlowMapsGUI
def sconvol1d(arreglo,kernel=None,scale_factor=1.,fwhm=None,std=None):
	"""
	This program will smooth a 2D array, including the edges,
	with one-dimensional kernels. Problems of this kind arise when,
	e.g. an array is to be convolved with a 2D symmetric
	gaussian, which is separable into two one-dimensional
	convolutions.
	"""
	#~ s=len(arreglo.shape)
	dims = np.ndim(arreglo)
	rows = arreglo.shape[0]
	collumns = arreglo.shape[1]

	if dims != 2:
		raise ValueError('Array must be 2-dimensional')
		
	if kernel == None:
		if (fwhm==None) and (std==None):
			raise ValueError('Convolve with what?')
		elif fwhm != None:
			std=fwhm/(2.*math.sqrt(2.*math.log(2.)))
		#~ elif  std != None:
			#~ std=std
		elif std != None:
			width=int(std*9.)
		if width%2 == 0:
			width+=1
		kernel=np.arange(float(width))-width/2
		kernel=np.exp(-(kernel*kernel)/(2.*std*std))
		kernel=kernel/(std*math.sqrt(2.*math.pi))

	else:
		width=len(kernel)
		if width%2 == 0:
			raise ValueError('Dimension of kernel must be odd')
		
	big=np.empty([arreglo.shape[0]+width-1,arreglo.shape[1]+width-1])
	
	edge=int(width/2)
	big[edge:big.shape[0]-edge,edge:big.shape[1]-edge]=arreglo
	for i in range(0,edge):
		big[edge:big.shape[0]-edge,i]=arreglo[:,edge-1-i]
		big[edge:big.shape[0]-edge,arreglo.shape[1]+edge+i]=arreglo[:,arreglo.shape[1]-1-i]
	
	#~ big=convol1d(big,kernel,scale_factor)
	big = correlate1d(big,(kernel/scale_factor),mode="constant",cval=np.nan)
	big[np.isnan(big)]=0.0
	big=np.rot90(big,-1)
	for i in range(0,edge):
		big[:,i] = big[:,2*edge-1-i]
		big[:,arreglo.shape[0]+edge+i] = big[:,arreglo.shape[0]+edge-1-i]
	
	#~ big=convol1d(big,kernel,scale_factor)
	big = correlate1d(big,(kernel/scale_factor),mode="constant",cval=np.nan)
	big[np.isnan(big)]=0.0
	big=np.rot90(big,-3)
	big=big[edge:arreglo.shape[0]+edge,edge:arreglo.shape[1]+edge]

	return big
コード例 #11
0
    def _perform_an_hyperplane_correlation(self, rollingZ, origin, destination,
                                           axis, kernel):
        """Perform a correlation of the internally stored hyperplane `rollingZ`.
See `prepare_hyperplane_operations` for the meaning of the parameters."""
        correlate1d(self._res[origin][rollingZ],
                    kernel,
                    axis=axis,
                    output=self._res[destination][rollingZ],
                    mode='constant')
コード例 #12
0
def reduce(img):
    """
    Reduce operation on the input image: Apply filter then downsample
    """
    lp_filter = [1 / 16, 4 / 16, 6 / 16, 4 / 16, 1 / 16]

    filtered_img = ndimage.correlate1d(img, lp_filter, axis=0)
    filtered_img = ndimage.correlate1d(filtered_img, lp_filter, axis=1)

    H, W = img.shape

    return filtered_img[range(1, W, 2), :][:, range(1, H, 2)]
コード例 #13
0
def test_correlate1d_complex(dtype_x, dtype_h, len_x, mode):
    x_cpu = np.arange(1, 1 + len_x, dtype=dtype_x)
    for len_h in range(1, 2 * len_x + 2):  # include cases for len_h > len_x
        h_cpu = np.arange(1, 1 + len_h, dtype=dtype_h)

        y = ndi.correlate1d(x_cpu.real, h_cpu.real, mode=mode, cval=0)
        y = y + 1j * ndi.correlate1d(x_cpu.imag, h_cpu.imag, mode=mode, cval=0)

        # test via convolve1d
        y3 = correlate1d(
            cp.asarray(x_cpu), cp.asarray(h_cpu), mode=mode, cval=0
        )
        cp.testing.assert_allclose(y, y3)
コード例 #14
0
ファイル: extlib.py プロジェクト: polashbora/aws_dl
def _separableFilterSingle(input,
                           weights,
                           output=None,
                           mode="reflect",
                           cval=0.0):
    """Apply separable filter to the input trace buffer, single trace output.
	
	Applies the filter described by weights. The input is assumed to be a
	NxMxNS (N & M>=3 and odd) block of traces. The function returns the filter
	output only at the centre trace of the NxM block.
	
	Args:
		input: the array to be filtered - NxM (N & M>=3 and odd)block of traces.
		weights: array with filter weights for each dimension of input
		output: optional, a 1D array to store the derivative filter output.
			Should be the same length as the last dimension of the input.
		mode: {'reflect', 'constant', 'nearest', 'mirror', 'wrap'} optional,
			specifies how the array boundaries are filtered. Default is
			'reflect'. Only applies along the last axis of the input
		cval: optional, specified value to pad input array if mode is 
			'constant'. Default is 0.0.

	Returns:
		filtered 1D array with same length as last dimension of the input.
	"""
    input = np.asarray(input)
    inshape = input.shape
    output = getOutput(output, input, (inshape[-1]))

    if input.ndim == 2:
        W0 = weights[0]
        W1 = weights[1]
        n = np.int_(inshape[0] - W1.shape[0]) // 2
        use = input if n == 0 else input[n:-n, :]
        tmp0 = np.sum(W0[:, np.newaxis] * use, 0)
        ndi.correlate1d(tmp0, W1, -1, output, mode='reflect')
    elif input.ndim == 3:
        W0 = weights[0]
        W1 = weights[1]
        W2 = weights[2]
        n = np.int_(inshape[0] - W0.shape[0]) // 2
        m = np.int_(inshape[1] - W1.shape[0]) // 2
        use = input if n == 0 else input[n:-n, :, :]
        use = use if m == 0 else use[:, m:-m, :]
        tmp0 = np.sum(W0[:, np.newaxis, np.newaxis] * use, 0)
        tmp1 = np.sum(W1[:, np.newaxis] * tmp0, 0)
        ndi.correlate1d(tmp1, W2, -1, output, mode='reflect')
    return output
コード例 #15
0
    def connect(self, input):
        """
        Connects parts of input array using this structuring element.

        Done by adding elements or array that 
        """

        if ((self.mode is None) or (self.mode == 'standard')
                or (self.mode == 'linear')):

            # standard or linear structuring element: generate and correlate
            se = self.generate()
            corr = ndimage.correlate(input, se, mode='constant')
            new = (corr > 1) | (input > 0)
            return new

        elif self.mode == '1d':
            if self.axis is None:

                # apply 1d structuring element along each axis
                new = (input > 0)
                se = self.generate()
                for axis in range(input.ndim):
                    corr = ndimage.correlate1d(input,
                                               se,
                                               axis=axis,
                                               mode='constant')
                    new = new | (corr > 1)
                return new

            else:

                # 1d structuring element along a given axis
                se = self.generate()
                corr = ndimage.correlate1d(input,
                                           se,
                                           axis=self.axis,
                                           mode='constant')
                new = (corr > 1) | (input > 0)
                return new

        # complain
        raise NotImplementedError("Sorry, don't know how to connect using " +
                                  "structuring element with " + "rank: " +
                                  str(self.rank) + ", mode: " +
                                  str(self.mode) + ", connectivity: " +
                                  str(self.connectivity) + " and size: " +
                                  str(self.size) + ".")
コード例 #16
0
ファイル: inversion.py プロジェクト: afocusman/MIIC-library
def quantify_vchange_drop(vc_curve, time_win_len):
    """ Rolling interval (instead of point) derivative

    For each t0 it calculates the simple (x_t1 - x_t2) where t0 spans the
    whole timeserie vc_curve and x_t1 and x_t2 are its average on two time
    windows of length time_win_len symmetric respect to t0. At the border
    the time series is reflected so that any long term trend should not be
    altered.

    :type vc_curve: :class:`~numpy.ndarray`
    :param vc_curve: The velocity change curve (when 2D one timeseries on each
        column)
    :type time_win_len: int
    :param time_win_len: Time window length (on each side)

    :rtype: :class:`~numpy.ndarray`
    :param vdrop: Velocity change point drop on the chosen time basis
        (2*timw_win_len days)
    """
    rolling_mask = np.ones((2 * time_win_len, ), dtype='f')
    rolling_mask[:time_win_len] = -1
    print rolling_mask
    vdrop = correlate1d(vc_curve, rolling_mask, axis=0, mode='reflect')\
         / time_win_len
    return vdrop
コード例 #17
0
    def load_reference_pulse(path):
        file = np.loadtxt(path)
        print("Loaded reference pulse: {}".format(path))
        time_slice = 1E-9
        refx = file[:, 0]
        refy = file[:, 1]
        f = interpolate.interp1d(refx, refy, kind=3)
        max_sample = int(refx[-1] / time_slice)
        x = np.linspace(0, max_sample * time_slice, max_sample + 1)
        y = f(x)

        # Put pulse in center so result peak time matches with input peak
        pad = y.size - 2 * np.argmax(y)
        if pad > 0:
            y = np.pad(y, (pad, 0), mode='constant')
        else:
            y = np.pad(y, (0, -pad), mode='constant')

        # Create 1p.e. pulse shape
        y_1pe = y / np.trapz(y)

        # Make maximum of cc result == 1
        y = y / correlate1d(y_1pe, y).max()

        return y, y_1pe
コード例 #18
0
def load_reference_pulse(path2):
    file = np.loadtxt(path2)
    print("Loaded reference pulse: {}".format(path2))
    time_slice = 1E-9
    refx = file[:, 0]
    refy = file[:, 1]
    f = interpolate.interp1d(refx, refy, kind=3)
    max_sample = int(refx[-1] / time_slice)
    x = np.linspace(0, max_sample * time_slice, max_sample + 1)
    y = f(x)
    #plt.plot(y)
    #plt.show()
    pad = y.size - 2 * np.argmax(
        y)  # Put pulse in center so result peak time matches with input peak
    if pad > 0:
        y = np.pad(y, (pad, 0), mode='constant')
    else:
        y = np.pad(y, (0, -pad), mode='constant')
    y_1pe = y / np.trapz(y)  # Create 1p.e. pulse shape
    #print('0', max(y), np.trapz(y))
    #print('1', max(y_1pe), np.trapz(y_1pe))
    y = y / correlate1d(y_1pe, y).max()  # Make maximum of cc result == 1
    #plt.plot(y_1pe, label='y_1pe')
    #plt.plot(y, label='y')
    #plt.legend(loc='best')
    #plt.show()
    return {'y': y, 'y_1pe': y_1pe}
コード例 #19
0
ファイル: displacement_field.py プロジェクト: ofenlab/nipy
def square_gaussian_filter1d(input,
                             sigma,
                             axis=-1,
                             output=None,
                             mode="reflect",
                             cval=0.0):
    """One-dimensional Squared Gaussian filter.

    The standard-deviation of the Gaussian filter is given by
    sigma.
    """
    sd = float(sigma)
    # make the length of the filter equal to 4 times the standard
    # deviations:
    lw = int(4.0 * sd + 0.5)
    weights = [0.0] * (2 * lw + 1)
    weights[lw] = 1.0
    sum = 1.0
    sd = sd * sd
    # calculate the kernel:
    for ii in range(1, lw + 1):
        tmp = math.exp(-0.5 * float(ii * ii) / sd)
        weights[lw + ii] = tmp
        weights[lw - ii] = tmp
        sum += 2.0 * tmp
    for ii in range(2 * lw + 1):
        weights[ii] /= sum
    weights[ii] = weights[ii]**2
    return correlate1d(input, weights, axis, output, mode, cval, 0)
コード例 #20
0
 def _prepare(self, waveforms):
     super()._prepare(waveforms)
     avg_wf = np.mean(self.waveforms, axis=0)
     self._peak_index = correlate1d(avg_wf,
                                    self.window,
                                    mode='constant',
                                    origin=self.origin).argmax()
コード例 #21
0
ファイル: inversion.py プロジェクト: ftilmann/miic
def quantify_vchange_drop(vc_curve, time_win_len):
    """ Rolling interval (instead of point) derivative

    For each t0 it calculates the simple (x_t1 - x_t2) where t0 spans the
    whole timeserie vc_curve and x_t1 and x_t2 are its average on two time
    windows of length time_win_len symmetric respect to t0. At the border
    the time series is reflected so that any long term trend should not be
    altered.

    :type vc_curve: :class:`~numpy.ndarray`
    :param vc_curve: The velocity change curve (when 2D one timeseries on each
        column)
    :type time_win_len: int
    :param time_win_len: Time window length (on each side)

    :rtype: :class:`~numpy.ndarray`
    :param vdrop: Velocity change point drop on the chosen time basis
        (2*timw_win_len days)
    """
    rolling_mask = np.ones((2 * time_win_len,), dtype='f')
    rolling_mask[:time_win_len] = -1
    print rolling_mask
    vdrop = correlate1d(vc_curve, rolling_mask, axis=0, mode='reflect')\
         / time_win_len
    return vdrop
コード例 #22
0
 def extract(self, waveforms, peak_index):
     cc = correlate1d(waveforms,
                      self.cc_ref_y,
                      mode='constant',
                      origin=self.origin)
     charge, _, = extract_around_peak(cc, peak_index, 1, 0, 1)
     return charge
コード例 #23
0
def orientationAssg(I: np.ndarray, g1: np.ndarray, keypts: list):
    Sx = np.array([-1, 0, 1])
    Sy = Sx.reshape(3, 1)
    Dx = ndimage.correlate1d(I, Sx, mode='nearest')
    Dy = ndimage.correlate(I, Sy, mode='nearest')
    nr, nc = I.shape
    L_mag = np.round(np.sqrt(Dx**2 + Dy**2), 2)
    L_ang = np.round(np.rad2deg(np.arctan(Dy / (Dx + np.finfo(float).eps))), 2)
    L_ang = np.where(Dx < 0, L_ang + 180, L_ang)
    L_ang = np.where(L_ang < 0, 360 + L_ang, L_ang)
    L_ang = np.round(L_ang / 45) * 45
    khist = np.empty(shape=(len(keypts), 8))
    l = 0
    #print(L_mag)
    for i in range(nr):
        for j in range(nc):
            if (i, j) in keypts:
                obin = np.zeros(shape=(8))
                for r in range(-2, 3):
                    for c in range(-2, 3):
                        if r + i < 0 or c + j < 0 or r + i >= nr or c + j >= nc:
                            continue
                        else:
                            obin[int(L_ang[r + i, c + j] //
                                     45)] += L_mag[r + i, c + j]
                khist[l] = obin
                l += 1
                if l >= len(keypts):
                    return khist
    return -1
コード例 #24
0
 def _prepare(self, waveforms):
     WaveformReducer._prepare(self, waveforms)
     avg_wfs = self.neighbor_func(self.waveforms, self.neighbors, 0)
     self._peak_index = correlate1d(avg_wfs,
                                    self.window,
                                    mode='constant',
                                    origin=self.origin).argmax(1)
コード例 #25
0
def test_correlate1d(dtype_x, dtype_h, len_x, mode):
    x_cpu = np.arange(1, 1 + len_x, dtype=dtype_x)
    for len_h in range(1, 2 * len_x + 2):  # include cases for len_h > len_x
        h_cpu = np.arange(1, 1 + len_h, dtype=dtype_h)
        min_origin = -(len_h // 2)
        max_origin = (len_h - 1) // 2

        for origin in range(min_origin, max_origin + 1):
            y = ndi.correlate1d(x_cpu, h_cpu, mode=mode, cval=0, origin=origin)

            # test via convolve1d
            y3 = correlate1d(
                cp.asarray(x_cpu),
                cp.asarray(h_cpu),
                mode=mode,
                cval=0,
                origin=origin,
            )
            cp.testing.assert_allclose(y, y3)

        for origin in [min_origin - 1, max_origin + 1]:
            with pytest.raises(ValueError):
                correlate1d(
                    cp.asarray(x_cpu),
                    cp.asarray(h_cpu),
                    mode=mode,
                    cval=0,
                    origin=origin,
                )
コード例 #26
0
ファイル: stlab3.py プロジェクト: JavaUnchained/OnlineCourses
def gauss_filter(noisy_img, sigma):
    tr = 4.0
    radius = int(float(sigma) * tr + 0.5)
    weights = kern(sigma, radius)
    res = noisy_img
    for i in range(2):
        res = correlate1d(res, weights, i, mode='reflect')
    return res
コード例 #27
0
 def _prepare(self, waveforms):
     super()._prepare(waveforms)
     self._cc = correlate1d(waveforms,
                            self.reference_pulse,
                            mode='constant',
                            origin=self.cc_origin)
     self._peak_index = self._cc.mean(0).argmax()
     self._charge = self._cc[:, self._peak_index]
コード例 #28
0
def __generate_swts(im, filter_bank, nlevels=2, detail_magnitude=False):
    """
    Generate the SWTs for an image and a given set of filters across the given number of levels.
    They are generated with all-approximate first and the A-D, D-A, D-D (for 2D) and in the order of
    the levels. This yields the filtered image which is an ndarray that cannot be saved as it will
    be reused to generate the next sample. This also means that the results of the generator cannot
    simply be put in a list.
    """
    # pylint: disable=too-many-locals
    from itertools import islice
    from numpy import int64, abs  # pylint: disable=redefined-builtin
    from scipy.ndimage import correlate1d

    temps = [im] + \
            [empty(im.shape, float if im.dtype.kind == 'f' else int64) for _ in range(im.ndim)]
    filter_bank = tuple(asarray(f, float) for f in filter_bank)

    for level in range(nlevels):
        last_filters = (None,
                        )  # the last set of filters used to generate a sample
        has_detail = False  # first iteration below never has a detail filter
        for filters in product(filter_bank, repeat=im.ndim):
            # Generate the filtered image but only for the axes that are different
            start = __get_first_mismatch(filters, last_filters)
            for i, fltr in islice(enumerate(filters), start, None):
                correlate1d(temps[i],
                            fltr,
                            i,
                            origin=-1,
                            mode='wrap',
                            output=temps[i + 1])
            last_filters = filters  # save which filters we just used
            if has_detail and detail_magnitude:
                abs(temps[-1], temps[-1])
            if not has_detail and level + 1 != nlevels:
                # Complete approximate image is used for the next level
                next_im = temps[-1].copy()
            # Generate the SWT sample
            yield temps[-1]
            has_detail = True  # remaining samples in this level has at least one detail filter

        if level + 1 != nlevels:
            # Upsample the filters and set the full-approximate filtered image as the base
            filter_bank = __upsample_filters(filter_bank)
            temps[0] = next_im
コード例 #29
0
    def backward(self, error_tensor):
        error_tensor_inTotal = np.shape(error_tensor)
        ## if input is a 1-D array
        if error_tensor_inTotal[3] is None:
            output_Convolve = ndimage.correlate1d(error_tensor, self.filter,'constant',cval=0)
            if self._optimizer is not None:
                self

        return
コード例 #30
0
def kroon3( input, axis=-1, output=None, mode="reflect", cval=0.0):
	"""Calculate a size 3 Kroon first derivative filter.
	Parameters
	----------
	%(input)s
	%(axis)s
	%(output)s
	%(mode)s
	%(cval)s
	"""
	input = np.asarray(input)
	axis = ndi._ni_support._check_axis(axis, input.ndim)
	output, return_value = ndi._ni_support._get_output(output, input)
	ndi.correlate1d(input, [-0.5, 0, 0.5], axis, output, mode, cval, 0)
	axes = [ii for ii in range(input.ndim) if ii != axis]
	for ii in axes:
		ndi.correlate1d(output, [0.178947,0.642105,0.178947], ii, output, mode, cval, 0,)
	return return_value
コード例 #31
0
def farid2_( input, axis=-1, output=None, mode="reflect", cval=0.0):
	"""Calculate a size 5 Farid second derivative filter.
	Parameters
	----------
	%(input)s
	%(axis)s
	%(output)s
	%(mode)s
	%(cval)s
	"""
	input = np.asarray(input)
	axis = ndi._ni_support._check_axis(axis, input.ndim)
	output, return_value = ndi._ni_support._get_output(output, input)
	ndi.correlate1d(input, [0.232905, 0.002668, -0.471147, 0.002668, 0.232905], axis, output, mode, cval, 0)
	axes = [ii for ii in range(input.ndim) if ii != axis]
	for ii in axes:
		ndi.correlate1d(output, [0.030320, 0.249724, 0.439911, 0.249724, 0.030320], ii, output, mode, cval, 0,)
	return return_value
コード例 #32
0
def farid5( input, axis=-1, output=None, mode="reflect", cval=0.0):
	"""Calculate a size 5 Farid first derivative filter.
	Parameters
	----------
	%(input)s
	%(axis)s
	%(output)s
	%(mode)s
	%(cval)s
	"""
	input = np.asarray(input)
	axis = ndi._ni_support._check_axis(axis, input.ndim)
	output, return_value = ndi._ni_support._get_output(output, input)
	ndi.correlate1d(input, [-0.109604, -0.276691,  0.000000, 0.276691, 0.109604], axis, output, mode, cval, 0)
	axes = [ii for ii in range(input.ndim) if ii != axis]
	for ii in axes:
		ndi.correlate1d(output, [0.037659,  0.249153,  0.426375, 0.249153, 0.037659], ii, output, mode, cval, 0,)
	return return_value
コード例 #33
0
    def prewitt(input, axis=-1, output=None, mode='reflect', cval=0.0):
        input = np.asarray(input)
        if axis < 0:
            axis += input.ndim
        if type(output) is not np.ndarray:
            output = np.zeros_like(input)

        kernel = list(range(1, size // 2 + 1))
        kernel = [-x for x in reversed(kernel)] + [0] + kernel
        smooth = np.ones(size, dtype=np.int32)
        smooth = smooth / np.abs(kernel).sum()
        smooth = list(smooth)

        ndimage.correlate1d(input, kernel, axis, output, mode, cval, 0)
        axes = [ii for ii in range(input.ndim) if ii != axis]
        for ii in axes:
            ndimage.correlate1d(output, smooth, ii, output, mode, cval, 0)
        return output
コード例 #34
0
    def _prepare(self, waveforms):
        WaveformReducer._prepare(self, waveforms)
        self._cc = correlate1d(waveforms,
                               self.reference_pulse,
                               mode='constant',
                               origin=self.cc_origin)

        self._peak_index = self._cc.argmax(1)
        self._charge = self._cc[self._pa, self._peak_index]
コード例 #35
0
 def _prepare(self, waveforms):
     WaveformReducer._prepare(self, waveforms)
     self._cc = correlate1d(waveforms,
                            self.reference_pulse,
                            mode='constant',
                            origin=self.cc_origin)
     avg_wfs = self.neighbor_func(self._cc, self.neighbors, 0)
     self._peak_index = avg_wfs.argmax(1)
     self._charge = self._cc[self._pa, self._peak_index]
コード例 #36
0
ファイル: calculus.py プロジェクト: nbren12/gnl
def diff_to_right(x, **kwargs):
    mode = kwargs.pop('mode', 'wrap')

    if mode == 'neumann':
        kwargs['mode'] = 'constant'
        kwargs['cval'] = 0.0
    else:
        kwargs['mode'] = mode

    return correlate1d(x, [1, -1], origin=-1, **kwargs)
コード例 #37
0
ファイル: convol.py プロジェクト: gosaOAN/FlowMapsGUI
def convol1d(array,kernel,scale_factor=None):
	"""
	The convol1d function convolves an array with a kernel 1D, 
	and returns the result. Convolution is a general process 
	that can be used for various types of smoothing, signal 
	processing, shifting, differentiation, edge detection, etc.
	"""
	
	
	row = array.shape[0]
	column = array.shape[1]
	R = np.zeros([row,column])	
	m = len(kernel)
	if scale_factor == None:
		r=correlate1d(array,kernel)
		R[:,m/2:column-math.ceil(m/2.)+1]=r[:,m/2:column-math.ceil(m/2.)+1]
	kernel=kernel/float(scale_factor)
	r=correlate1d(array,kernel)
	R[:,m/2:column-math.ceil(m/2.)+1]=r[:,m/2:column-math.ceil(m/2.)+1]
	
	return R
コード例 #38
0
ファイル: opencl_test2.py プロジェクト: bjkomer/pyratslam
  def scipy_convolution( self ):
    """Performs the convolution without OpenCL, as a comparison"""

    im = self.im
    if self.larger_buffer:
      if self.dim == 1:
        im = self.im[self.offset:-self.offset]
      elif self.dim == 2:
        im = self.im[self.offset:-self.offset,self.offset:-self.offset]
      elif self.dim == 3:
        im = self.im[self.offset:-self.offset,self.offset:-self.offset,self.offset:-self.offset]
    
    tstart = time.time()
    
    if self.sep:
      if self.dim == 1:
        out = ndimage.correlate1d( input=im, weights=self.fil.tolist(), axis=0, mode='wrap', origin=0 )
      elif self.dim == 2:
        out = ndimage.correlate1d( input=im, weights=self.fil.tolist(), axis=0, mode='wrap', origin=0 )
        out = ndimage.correlate1d( input=out, weights=self.fil.tolist(), axis=1, mode='wrap', origin=0 )
      elif self.dim == 3:
        out = ndimage.correlate1d( input=im, weights=self.fil.tolist(), axis=0, mode='wrap', origin=0 )
        out = ndimage.correlate1d( input=out, weights=self.fil.tolist(), axis=1, mode='wrap', origin=0 )
        out = ndimage.correlate1d( input=out, weights=self.fil.tolist(), axis=2, mode='wrap', origin=0 )
    else:
      out = ndimage.correlate( im, self.fil, mode='wrap' )
    print "filtered scipy image", time.time() - tstart, "\n", out
    
    assert numpy.array_equal( out, self.out ), "The PyOpenCL result does not match with Scipy"
コード例 #39
0
 def forwardDifferencesRGB(inputData):
     r = np.zeros(inputData.shape)
     g = np.zeros(inputData.shape)
     b = np.zeros(inputData.shape)
     ndimage.correlate1d(inputData, [-1, 1], origin=-1, axis=1, output=r)
     ndimage.correlate1d(inputData, [-1, 1], origin=-1, axis=0, output=g)
     ndimage.correlate1d(inputData, [-1, 1], origin=-1, axis=2, output=b)
     return GradientCalculation.normalize(np.concatenate((r[...,np.newaxis],g[...,np.newaxis],b[...,np.newaxis]),axis=3))
コード例 #40
0
ファイル: util.py プロジェクト: dhomeier/pyspec
def cross_correlate(spectrum, template, mode='shift'):
    """
    Cross correlates a spectrum with a template spectrum
    
    
    Inputs
    ------
    
    spectrum    :   Spectrum to crosscorrelate against template. Must be a :class:`onedspec`
                    class object.
                    
    template    :   Template to crosscorrelate against spectrum. Must be a :class:`onedspec`
                    class object.
    
    peak_tol    :   The peak of the profile will be found in the region from
                    (line - peak_tol) to (line + peak_tol). This can be set to
                    zero if the peak of the profile is already known.
                    
                    
    Outputs
    -------
    
    peak        :   Peak value of the profile found
    
    position    :   Position of the profile centroid (Angstroms)
    
    sigma       :   Gaussian sigma found for the best-fitting profile.
    
    """
    
    newWave = 3e5*(spectrum.wave - np.mean(spectrum.wave)) / np.mean(spectrum.wave)
    crossCorrelation = ndimage.correlate1d(spectrum.flux, template.interpolate(spectrum.wave).flux, mode='wrap')
    crossCorrelation -= np.median(crossCorrelation)
    
    peakPos = newWave[np.argmax(crossCorrelation)]
    peak = np.max(crossCorrelation)
    
    crossCorrSpectrum = onedspec(newWave, crossCorrelation, mode='waveflux')
    if mode == 'spectrum':
        return crossCorrSpectrum
    elif mode == 'shift':
        fitfunc = lambda p, x: p[0] * np.exp(-(x - p[1])**2 / (2.0 * p[2]**2))
        errfunc = lambda p, x, y: fitfunc(p, x) - y
        
        return optimize.leastsq(errfunc, (peak, peakPos, 1), args=(crossCorrSpectrum.wave, crossCorrSpectrum.flux))[0]
    else:
        raise NotImplementedError('Mode %s is not supported' % mode)
コード例 #41
0
 def centralDifferencesRGB(inputData):
     r = np.zeros(inputData.shape)
     g = np.zeros(inputData.shape)
     b = np.zeros(inputData.shape)
     ndimage.correlate1d(inputData, [-1, 0, 1], axis=1, output=r)
     ndimage.correlate1d(inputData, [-1, 0, 1], axis=0, output=g)
     ndimage.correlate1d(inputData, [-1, 0, 1], axis=2, output=b)
     r = GradientCalculation.normalize_range(r, 0, 1)
     g = GradientCalculation.normalize_range(g, 0, 1)
     b = GradientCalculation.normalize_range(b, 0, 1) 
     return np.concatenate((r[...,np.newaxis],g[...,np.newaxis],b[...,np.newaxis]),axis=3)
コード例 #42
0
ファイル: diverse.py プロジェクト: carichte/rexs
def lorentzian_filter1d(input, fwhm, axis = -1, output = None,
                      mode = "reflect", cval = 0.0):
    """One-dimensional Lorentzian filter.

    Parameters
    ----------
    %(input)s
    sigma : scalar
        standard deviation for Gaussian kernel
    %(axis)s
    order : {0, 1, 2, 3}, optional
        An order of 0 corresponds to convolution with a Gaussian
        kernel. An order of 1, 2, or 3 corresponds to convolution with
        the first, second or third derivatives of a Gaussian. Higher
        order derivatives are not implemented
    %(output)s
    %(mode)s
    %(cval)s
    """
    fwhm = abs(float(fwhm))
    #sd = fwhm/2.35482
    # make the length of the filter equal to 4 times the standard
    # deviations:
    lw = int(20.0 * fwhm + 0.5)
    weights = [0.0] * (2 * lw + 1)
    weights[lw] = 1.0
    sum = 1.0
    #sd = sd * sd
    # calculate the kernel:
    for ii in range(1, lw + 1):
        #tmp = math.exp(-0.5 * float(ii * ii) / sd)
        #tmp = 1./np.pi * fwhm / (fwhm * fwhm + ii * ii)
        tmp = 1. / (1. + ii * ii / (fwhm * fwhm))
        weights[lw + ii] = tmp
        weights[lw - ii] = tmp
        sum += 2.0 * tmp
    for ii in range(2 * lw + 1):
        weights[ii] /= sum
    return ndimage.correlate1d(input, weights, axis, output, mode, cval, 0)
コード例 #43
0
def square_gaussian_filter1d(input, sigma, axis = -1, output = None, mode = "reflect", cval = 0.0):
    """One-dimensional Squared Gaussian filter.

    The standard-deviation of the Gaussian filter is given by
    sigma.
    """
    sd = float(sigma)
    # make the length of the filter equal to 4 times the standard
    # deviations:
    lw = int(4.0 * sd + 0.5)
    weights = [0.0] * (2 * lw + 1)
    weights[lw] = 1.0
    sum = 1.0
    sd = sd * sd
    # calculate the kernel:
    for ii in range(1, lw + 1):
        tmp = math.exp(- 0.5 * float(ii * ii) / sd)
        weights[lw + ii] = tmp
        weights[lw - ii] = tmp
        sum += 2.0 * tmp
    for ii in range(2 * lw + 1):
        weights[ii] /= sum
    weights[ii] = weights[ii]**2
    return correlate1d(input, weights, axis, output, mode, cval, 0)
コード例 #44
0
 def run(self, workspace):
     #
     # Get the input and output image names. You need to get the .value
     # because otherwise you'll get the setting object instead of
     # the string name.
     #
     input_image_name = self.input_image_name.value
     output_image_name = self.output_image_name.value
     #
     # Get the image set. The image set has all of the images in it.
     #
     image_set = workspace.image_set
     #
     # Get the input image object. We want a grayscale image here.
     # The image set will convert a color image to a grayscale one
     # and warn the user.
     #
     input_image = image_set.get_image(input_image_name,
                                       must_be_grayscale = True)
     #
     # Get the pixels - these are a 2-d Numpy array.
     #
     pixels = input_image.pixel_data
     #
     # Get the smoothing parameter
     #
     if self.automatic_smoothing:
         # Pick the mode of the power spectrum - obviously this
         # is pretty hokey, not intended to really find a good number.
         #
         fft = np.fft.fft2(pixels)
         power2 = np.sqrt((fft * fft.conjugate()).real)
         mode = np.argwhere(power2 == power2.max())[0]
         scale = np.sqrt(np.sum((mode+.5)**2))
     else:
         scale = self.scale.value
     g = gaussian_gradient_magnitude(pixels, scale)
     if self.gradient_choice == GRADIENT_MAGNITUDE:
         output_pixels = g
     else:
         # Numpy uses i and j instead of x and y. The x axis is 1
         # and the y axis is 0
         x = correlate1d(g, [-1, 0, 1], 1)
         y = correlate1d(g, [-1, 0, 1], 0)
         norm = np.sqrt(x**2+y**2)
         if self.gradient_choice == GRADIENT_DIRECTION_X:
             output_pixels = .5 + x / norm / 2
         else:
             output_pixels = .5 + y / norm / 2
     #
     # Make an image object. It's nice if you tell CellProfiler
     # about the parent image - the child inherits the parent's
     # cropping and masking, but it's not absolutely necessary
     #
     output_image = cpi.Image(output_pixels, parent_image = input_image)
     image_set.add(output_image_name, output_image)
     #
     # Save intermediate results for display if the window frame is on
     #
     if self.show_window:
         workspace.display_data.input_pixels = pixels
         workspace.display_data.gradient = g
         workspace.display_data.output_pixels = output_pixels
コード例 #45
0
ファイル: curve.py プロジェクト: leewalsh/square-tracking
def der(f, dx=None, x=None, xwidth=None, iwidth=None, order=1, min_scale=1):
    """ Take a finite derivative of f(x) using convolution with gaussian

    A function convolved with the derivative of a gaussian kernel gives the
    derivative of the function convolved with the integral of the kernel of a
    gaussian kernel. For any convolution:
        (f * g)' = f * g' = g * f'
    so we start with f and g', and return g and f', a smoothed derivative.

    Optionally can not smooth by giving width 0.

    parameters
    ----------
    f : an array to differentiate
    xwidth or iwidth : smoothing width (sigma) for gaussian.
        use iwidth for index units, (simple array index width)
        use xwidth for the physical units of x (x array is required)
        use 0 for no smoothing.
    x or dx : required for normalization
        if x is provided, dx = np.diff(x)
        otherwise, a scalar dx is presumed
        if dx=1, use a simple finite difference with np.diff
        if dx>1, convolves with the derivative of a gaussian, sigma=dx
    order : how many derivatives to take
    min_scale : the smallest physical scale involved in index units. e.g., fps.

    returns
    -------
    df_dx : the `order`th derivative of f with respect to x
    """
    if dx is None and x is None:
        dx = 1
    elif dx is None:
        dx = x.copy()
        dx[:-1] = dx[1:] - dx[:-1]
        assert dx[:-1].min() > 1e-6, ("Non-increasing independent variable "
                                      "(min step {})".format(dx[:-1].min()))
        dx[-1] = dx[-2]
        if np.allclose(dx, dx[0]):
            dx = dx[0]

    if xwidth is None and iwidth is None:
        if x is None:
            iwidth = 1
        else:
            xwidth = 1
    if iwidth is None:
        iwidth = xwidth / dx

    if iwidth == 0 or iwidth is 1:
        if order == 1:
            df = f.copy()
            df[:-1] = df[1:] - df[:-1]
            df[-1] = df[-2]
        else:
            df = np.diff(f, n=order)
            beg, end = order//2, (order+1)//2
            df = np.concatenate([[df[0]]*beg, df, [df[-1]]*end])
    elif iwidth is 2 and order == 1:
        return np.gradient(f, dx)
    else:
        from scipy.ndimage import correlate1d
        min_iwidth = 0.5
        if iwidth < min_iwidth:
            msg = "Width of {} too small for reliable results using {}"
            raise UserWarning(msg.format(iwidth, min_iwidth))
        # kernel truncated at truncate*iwidth; it is 4 by default
        truncate = np.clip(4, min_scale/iwidth, 100/iwidth)
        kern = gaussian_kernel(iwidth, order=order, truncate=truncate)
        # TODO: avoid spreading nans
        # correlate f(nan-->0) and norm by correlation of x * isfinite(f)
        # df = correlate1d(np.nan_to_num(f), kern, mode='nearest')
        # df /= correlate1d(x*np.isfinite(f).astype('f'), kern, mode='nearest')
        # but the above is not properly tested.
        df = correlate1d(f, kern, mode='nearest')

    return df/dx**order
コード例 #46
0
ファイル: guess.py プロジェクト: neoKushan/vhs-teletext
 def update(self):
     self.convolved[self.low:self.high] = correlate1d(self._guess_scaler(self._guess_x[self.olow:self.ohigh]), _kernel)[_lk:-_lk]
コード例 #47
0
ファイル: dni.py プロジェクト: franzmelchiori/dipy
 def derivative2(input, axis, output, mode, cval):
     return correlate1d(input, scale*np.array([1, -2, 1]), axis, output, mode, cval, 0)
コード例 #48
0
ファイル: environment.py プロジェクト: naymen/MELA
 def gauss(self, a):
     a = np.asarray(a)
     axes = range(a.ndim)
     for axis in axes:
         ndimage.correlate1d(a, self.kernel, axis, a, self.mode)
コード例 #49
0
def _gradient(input, d_mask=[-1,0,1]):
    dFun = lambda axis: correlate1d(input, d_mask, axis)
    return tuple(dFun(axis) for axis in xrange(input.ndim))
コード例 #50
0
ファイル: guess.py プロジェクト: neoKushan/vhs-teletext
 def update_all(self):
     self.convolved[0:self._width] = correlate1d(self._guess_scaler(self._guess_x)[0:self._width], _kernel)
コード例 #51
0
ファイル: calculus.py プロジェクト: nbren12/gnl
def average_to_left(x, **kwargs):
    return correlate1d(x, [.5, .5], origin=0, **kwargs)
コード例 #52
0
ファイル: guess.py プロジェクト: neoKushan/vhs-teletext
 def update_cri(self, low, high):
     self.convolved[low:high] = correlate1d(self._guess_scaler(self._guess_x[low:high]), _kernel)
     self.mask[low:high] = self._mask_scaler(self._guess_x[low:high])
コード例 #53
0
ファイル: regrid.py プロジェクト: nbren12/gnl
def _diff(x, axis=-1, mode='wrap'):
    return correlate1d(x, [-1, 1], axis=axis, mode=mode, origin=-1)