Ejemplo n.º 1
0
    def __FilterData2D(self, data):
        mode = _ni_support._extend_mode_to_code("reflect")
        #lowpass filter to suppress noise
        #a = ndimage.gaussian_filter(data.astype('f4'), self.filterRadiusLowpass)
        #print data.shape

        data = data - data.mean()

        output, a = _ni_support._get_output(None, data)
        if self.PRIaxis == 'x':
            _nd_image.correlate1d(data, weightsLowpass, 0, output, mode, 0, 0)
            _nd_image.correlate1d(output, weightsHighpass, 1, output, mode, 0,
                                  0)
        else:
            _nd_image.correlate1d(data, weightsHighpass, 0, output, mode, 0, 0)
            _nd_image.correlate1d(output, weightsLowpass, 1, output, mode, 0,
                                  0)
        #print numpy.absolute(a - a_).mean()

        #lowpass filter again to find background
        #b = ndimage.gaussian_filter(a, self.filterRadiusHighpass)

        #output, b = _ni_support._get_output(None, data)
        #_nd_image.correlate1d(data, weightsHighpass, 0, output, mode, 0,0)
        #_nd_image.correlate1d(output, weightsHighpass, 1, output, mode, 0,0)

        return a  #- b
Ejemplo n.º 2
0
    def __FilterData2D(self,data):
        mode = _ni_support._extend_mode_to_code("reflect")
        #lowpass filter to suppress noise
        #a = ndimage.gaussian_filter(data.astype('f4'), self.filterRadiusLowpass)
        #print data.shape
        
        data = data - data.mean()

        output, a = _ni_support._get_output(None, data)
        if self.PRIaxis == 'x':
            _nd_image.correlate1d(data, weightsLowpass, 0, output, mode, 0,0)
            _nd_image.correlate1d(output, weightsHighpass, 1, output, mode, 0,0)
        else:
            _nd_image.correlate1d(data, weightsHighpass, 0, output, mode, 0,0)
            _nd_image.correlate1d(output, weightsLowpass, 1, output, mode, 0,0)
        #print numpy.absolute(a - a_).mean()

        #lowpass filter again to find background
        #b = ndimage.gaussian_filter(a, self.filterRadiusHighpass)

        #output, b = _ni_support._get_output(None, data)
        #_nd_image.correlate1d(data, weightsHighpass, 0, output, mode, 0,0)
        #_nd_image.correlate1d(output, weightsHighpass, 1, output, mode, 0,0)

        return a #- b
Ejemplo n.º 3
0
def _correlate_or_convolve(input, weights, output, mode, cval, origin,
                           convolution):
    input = np.asarray(input)
    if np.iscomplexobj(input):
        raise TypeError('Complex type not supported')
    origins = _ni_support._normalize_sequence(origin, input.ndim)
    weights = np.asarray(weights, dtype=np.float64)
    wshape = [ii for ii in weights.shape if ii > 0]
    if len(wshape) != input.ndim:
        raise RuntimeError('filter weights array has incorrect shape.')
    if convolution:
        weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
        for ii in range(len(origins)):
            origins[ii] = -origins[ii]
            if not weights.shape[ii] & 1:
                origins[ii] -= 1
    for origin, lenw in zip(origins, wshape):
        if _invalid_origin(origin, lenw):
            raise ValueError('Invalid origin; origin must satisfy '
                             '-(weights.shape[k] // 2) <= origin[k] <= '
                             '(weights.shape[k]-1) // 2')

    if not weights.flags.contiguous:
        weights = weights.copy()
    output = _ni_support._get_output(output, input)
    mode = _ni_support._extend_mode_to_code(mode)
    _nd_image.correlate(input, weights, output, mode, cval, origins)
    return output
Ejemplo n.º 4
0
def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
                cval=0.0, origin=0):
    """Calculate a one-dimensional correlation along the given axis.

    The lines of the array along the given axis are correlated with the
    given weights.

    Parameters
    ----------
    %(input)s
    weights : array
        One-dimensional sequence of numbers.
    %(axis)s
    %(output)s
    %(mode)s
    %(cval)s
    %(origin)s
    """
    input = numpy.asarray(input)
    if numpy.iscomplexobj(input):
        raise TypeError('Complex type not supported')
    output, return_value = _ni_support._get_output(output, input)
    weights = numpy.asarray(weights, dtype=numpy.float64)
    if weights.ndim != 1 or weights.shape[0] < 1:
        raise RuntimeError('no filter weights given')
    if not weights.flags.contiguous:
        weights = weights.copy()
    axis = _ni_support._check_axis(axis, input.ndim)
    if ((len(weights) // 2 + origin < 0) or
        (len(weights) // 2 + origin > len(weights))):
        raise ValueError('invalid origin')
    mode = _ni_support._extend_mode_to_code(mode)
    _nd_image.correlate1d(input, weights, axis, output, mode, cval,
                          origin)
    return return_value
Ejemplo n.º 5
0
    def __FilterDataFast(self):
        data = self.data[:,:,0]
        mode = _ni_support._extend_mode_to_code("reflect")
        # lowpass filter to suppress noise
        output, a = _ni_support._get_output(None, data)
        _nd_image.correlate1d(data, ofdP.weightsLowpass, 0, output, mode, 0, 0)
        _nd_image.correlate1d(output, ofdP.weightsLowpass, 1, output, mode, 0, 0)

        # lowpass filter again to find background
        output, b = _ni_support._get_output(None, data)
        _nd_image.correlate1d(data, ofdP.weightsHighpass, 0, output, mode, 0, 0)
        _nd_image.correlate1d(output, ofdP.weightsHighpass, 1, output, mode, 0, 0)

        return a - b
Ejemplo n.º 6
0
    def __FilterDataFast(self):
        data = self.data[:, :, 0]
        mode = _ni_support._extend_mode_to_code("reflect")
        # lowpass filter to suppress noise
        output, a = _ni_support._get_output(None, data)
        _nd_image.correlate1d(data, ofdP.weightsLowpass, 0, output, mode, 0, 0)
        _nd_image.correlate1d(output, ofdP.weightsLowpass, 1, output, mode, 0,
                              0)

        # lowpass filter again to find background
        output, b = _ni_support._get_output(None, data)
        _nd_image.correlate1d(data, ofdP.weightsHighpass, 0, output, mode, 0,
                              0)
        _nd_image.correlate1d(output, ofdP.weightsHighpass, 1, output, mode, 0,
                              0)

        return a - b
Ejemplo n.º 7
0
    def Afunc(self, f):
        """Forward transform - convolve with the PSF"""
        fs = np.reshape(f, (self.height, self.width, self.depth))

        #d = ndimage.gaussian_filter(fs, self.sigma)
        mode = _ni_support._extend_mode_to_code("reflect")
        #lowpass filter to suppress noise
        #a = ndimage.gaussian_filter(data.astype('f4'), self.filterRadiusLowpass)
        #print data.shape

        output, a = _ni_support._get_output(None, fs)
        _nd_image.correlate1d(fs, self.kernel, 0, output, mode, 0, 0)
        _nd_image.correlate1d(output, self.kernel, 1, output, mode, 0, 0)

        #ndimage.uniform_filter(output, self.oversamp, output=output)

        #d = real(d);
        return np.ravel(output)  #[::oversamp,::oversamp,:])
Ejemplo n.º 8
0
    def Afunc(self, f):
        '''Forward transform - convolve with the PSF'''
        fs = reshape(f, (self.height, self.width, self.depth))

        #d = ndimage.gaussian_filter(fs, self.sigma)
        mode = _ni_support._extend_mode_to_code("reflect")
        #lowpass filter to suppress noise
        #a = ndimage.gaussian_filter(data.astype('f4'), self.filterRadiusLowpass)
        #print data.shape

        output, a = _ni_support._get_output(None, fs)
        _nd_image.correlate1d(fs, self.kernel, 0, output, mode, 0,0)
        _nd_image.correlate1d(output, self.kernel, 1, output, mode, 0,0)
        
        #ndimage.uniform_filter(output, self.oversamp, output=output)

        #d = real(d);
        return ravel(output)#[::oversamp,::oversamp,:])
Ejemplo n.º 9
0
    def __FilterData2D(self, data):
        mode = _ni_support._extend_mode_to_code("reflect")
        #lowpass filter to suppress noise
        #a = ndimage.gaussian_filter(data.astype('f4'), self.filterRadiusLowpass)
        #print data.shape

        #output, a = _ni_support._get_output(None, data)
        a = np.zeros(data.shape, dtype=data.dtype.name)
        _nd_image.correlate1d(data, weightsLowpass, 0, a, mode, 0, 0)
        _nd_image.correlate1d(a, weightsLowpass, 1, a, mode, 0, 0)

        #print np.absolute(a - a_).mean()

        #lowpass filter again to find background
        #b = ndimage.gaussian_filter(a, self.filterRadiusHighpass)

        b = np.zeros(data.shape, dtype=data.dtype.name)
        _nd_image.correlate1d(data, weightsHighpass, 0, b, mode, 0, 0)
        _nd_image.correlate1d(b, weightsHighpass, 1, b, mode, 0, 0)

        return a - b
Ejemplo n.º 10
0
def map_coordinates_parallel(input, coordinates, output=None, order=3, mode='constant', cval=0.0,
                             prefilter=True, chunklen=None, threads=None):
    """

    Parallalized version of `scipy.ndimage.map_coordinates`.

    `scipy.ndimage.map_coordinates` is slow for large datasets. Speed improvement can be
    achieved by

     * Splitting the data into chunks
     * Performing the transformation of chunks in parallel

    New parameters:

    chunklen: Size of the chunks in pixels per axis. Default: None
        Special values:
            None: Automatic (Chooses a default based on number of dimensions)
            0: Do not split data into chunks. (implicitly sets threads==1)

    threads: Number of threads. Default: None
        None: Automatic (One thread per available processing unit)

    """

    # this part is taken without change from scipy's serial implementation
    if order < 0 or order > 5:
        raise RuntimeError('spline order not supported')
    input = np.asarray(input)
    if np.iscomplexobj(input):
        raise TypeError('Complex type not supported')
    coordinates = np.asarray(coordinates)
    if np.iscomplexobj(coordinates):
        raise TypeError('Complex type not supported')
    output_shape = coordinates.shape[1:]
    if input.ndim < 1 or len(output_shape) < 1:
        raise RuntimeError('input and output rank must be > 0')
    if coordinates.shape[0] != input.ndim:
        raise RuntimeError('invalid shape for coordinate array')
    mode = _ni_support._extend_mode_to_code(mode)
    if prefilter and order > 1:
        filtered = spline_filter(input, order, output=np.float64)
    else:
        filtered = input

    # return value of `_ni_support._get_output` changed between scipy versions, code here is
    # adapted to work with both
    output = _ni_support._get_output(output, input, shape=output_shape)
    retval = output
    if isinstance(output, tuple):
        output, retval = output

    # below here there is the new code for splitting into chunks and parallel execution
    if chunklen is None:
        # set defaults
        chunklen = 128
        if output.ndim < 3:
            chunklen = 1024

    def chunk_arguments(filtered, coordinates, output):
        chunks = []
        for axis in range(output.ndim):
            chunkstarts = np.arange(0, output.shape[axis], chunklen)
            chunkends = chunkstarts + chunklen
            chunkends[-1] = output.shape[axis]
            chunks.append([slice(start, stop) for start, stop in zip(chunkstarts, chunkends)])

        for chunk in itertools.product(*chunks):
            sub_coordinates = coordinates[(slice(None),) + chunk].copy()
            filtered_region = []
            for in_axis in range(filtered.ndim):
                c = sub_coordinates[in_axis, ...]
                cmin = max(0, int(np.floor(np.min(c)))-5)
                cmax = min(filtered.shape[in_axis], int(np.ceil(np.max(c)))+5)
                sub_coordinates[in_axis, ...] -= cmin
                filtered_region.append(slice(cmin, cmax))
            sub_filtered = filtered[tuple(filtered_region)]
            sub_output = output[chunk]

            yield (sub_filtered, sub_coordinates, sub_output)

    def map_coordinates_chunk(arg):
        sub_filtered, sub_coordinates, sub_output = arg
        _nd_image.geometric_transform(sub_filtered, None, sub_coordinates, None, None,
                                      sub_output, order, mode, cval, None, None)

    if chunklen > 0:
        list_of_chunk_args = list(chunk_arguments(filtered, coordinates, output))
    else:
        list_of_chunk_args = [(filtered, coordinates, output)]

    if len(list_of_chunk_args) == 1:
        threads = 1

    if threads != 1:
        threadpool = ThreadPoolExecutor(threads)
        my_map = threadpool.map
    else:
        my_map = map

    # execution happens here
    list(my_map(map_coordinates_chunk, list_of_chunk_args))

    if threads != 1:
        if have_concurrent_futures:
            threadpool.shutdown()
        else:
            threadpool.close()
            threadpool.join()

    return retval
Ejemplo n.º 11
0
def map_coordinates_parallel(input,
                             coordinates,
                             output=None,
                             order=3,
                             mode='constant',
                             cval=0.0,
                             prefilter=True,
                             chunklen=None,
                             threads=None):
    """

    Parallalized version of `scipy.ndimage.map_coordinates`.

    `scipy.ndimage.map_coordinates` is slow for large datasets. Speed improvement can be
    achieved by

     * Splitting the data into chunks
     * Performing the transformation of chunks in parallel

    New parameters:

    chunklen: Size of the chunks in pixels per axis. Default: None
        Special values:
            None: Automatic (Chooses a default based on number of dimensions)
            0: Do not split data into chunks. (implicitly sets threads==1)

    threads: Number of threads. Default: None
        None: Automatic (One thread per available processing unit)

    """

    # this part is taken without change from scipy's serial implementation
    if order < 0 or order > 5:
        raise RuntimeError('spline order not supported')
    input = np.asarray(input)
    if np.iscomplexobj(input):
        raise TypeError('Complex type not supported')
    coordinates = np.asarray(coordinates)
    if np.iscomplexobj(coordinates):
        raise TypeError('Complex type not supported')
    output_shape = coordinates.shape[1:]
    if input.ndim < 1 or len(output_shape) < 1:
        raise RuntimeError('input and output rank must be > 0')
    if coordinates.shape[0] != input.ndim:
        raise RuntimeError('invalid shape for coordinate array')
    mode = _ni_support._extend_mode_to_code(mode)
    if prefilter and order > 1:
        filtered = spline_filter(input, order, output=np.float64)
    else:
        filtered = input

    # return value of `_ni_support._get_output` changed between scipy versions, code here is
    # adapted to work with both
    output = _ni_support._get_output(output, input, shape=output_shape)
    retval = output
    if isinstance(output, tuple):
        output, retval = output

    # below here there is the new code for splitting into chunks and parallel execution
    if chunklen is None:
        # set defaults
        chunklen = 128
        if output.ndim < 3:
            chunklen = 1024

    def chunk_arguments(filtered, coordinates, output):
        chunks = []
        for axis in range(output.ndim):
            chunkstarts = np.arange(0, output.shape[axis], chunklen)
            chunkends = chunkstarts + chunklen
            chunkends[-1] = output.shape[axis]
            chunks.append([
                slice(start, stop)
                for start, stop in zip(chunkstarts, chunkends)
            ])

        for chunk in itertools.product(*chunks):
            sub_coordinates = coordinates[(slice(None), ) + chunk].copy()
            filtered_region = []
            for in_axis in range(filtered.ndim):
                c = sub_coordinates[in_axis, ...]
                cmin = max(0, int(np.floor(np.min(c))) - 5)
                cmax = min(filtered.shape[in_axis],
                           int(np.ceil(np.max(c))) + 5)
                sub_coordinates[in_axis, ...] -= cmin
                filtered_region.append(slice(cmin, cmax))
            sub_filtered = filtered[tuple(filtered_region)]
            sub_output = output[chunk]

            yield (sub_filtered, sub_coordinates, sub_output)

    def map_coordinates_chunk(arg):
        sub_filtered, sub_coordinates, sub_output = arg
        _nd_image.geometric_transform(sub_filtered, None, sub_coordinates,
                                      None, None, sub_output, order, mode,
                                      cval, None, None)

    if chunklen > 0:
        list_of_chunk_args = list(
            chunk_arguments(filtered, coordinates, output))
    else:
        list_of_chunk_args = [(filtered, coordinates, output)]

    if len(list_of_chunk_args) == 1:
        threads = 1

    if threads != 1:
        threadpool = ThreadPoolExecutor(threads)
        my_map = threadpool.map
    else:
        my_map = map

    # execution happens here
    list(my_map(map_coordinates_chunk, list_of_chunk_args))

    if threads != 1:
        if have_concurrent_futures:
            threadpool.shutdown()
        else:
            threadpool.close()
            threadpool.join()

    return retval