Пример #1
0
def test_extra_properties_intensity():
    region = regionprops(
        SAMPLE,
        intensity_image=INTENSITY_SAMPLE,
        extra_properties=(median_intensity, ),
    )[0]
    assert region.median_intensity == cp.median(INTENSITY_SAMPLE[SAMPLE == 1])
Пример #2
0
def test_extra_properties_mixed():
    # mixed properties, with and without intensity
    region = regionprops(SAMPLE,
                         intensity_image=INTENSITY_SAMPLE,
                         extra_properties=(median_intensity, pixelcount))[0]
    assert region.median_intensity == cp.median(INTENSITY_SAMPLE[SAMPLE == 1])
    assert region.pixelcount == cp.sum(SAMPLE == 1)
Пример #3
0
def test_image_shape():
    """Test that shape of output image in deconvolution is same as input.

    This addresses issue #1172.
    """
    point = cp.zeros((5, 5), np.float)
    point[2, 2] = 1.0
    psf = ndi.gaussian_filter(point, sigma=1.0)
    # image shape: (45, 45), as reported in #1172
    image = cp.asarray(test_img[65:165, 215:315])  # just the face
    image_conv = ndi.convolve(image, psf)
    deconv_sup = restoration.wiener(image_conv, psf, 1)
    deconv_un = restoration.unsupervised_wiener(image_conv, psf)[0]
    # test the shape
    assert image.shape == deconv_sup.shape
    assert image.shape == deconv_un.shape
    # test the reconstruction error
    sup_relative_error = cp.abs(deconv_sup - image) / image
    un_relative_error = cp.abs(deconv_un - image) / image
    cp.testing.assert_array_less(cp.median(sup_relative_error), 0.1)
    cp.testing.assert_array_less(cp.median(un_relative_error), 0.1)
Пример #4
0
 def single_group(vals, positions):
     result = []
     if find_min:
         result += [vals.min()]
     if find_min_positions:
         result += [positions[vals == vals.min()][0]]
     if find_max:
         result += [vals.max()]
     if find_max_positions:
         result += [positions[vals == vals.max()][0]]
     if find_median:
         result += [cupy.median(vals)]
     return result
Пример #5
0
def _mad(x, **kwargs):
    """Return the mean absolute deviation around the median."""
    return cp.mean(cp.abs(x - cp.median(x, **kwargs)), **kwargs)
Пример #6
0
def median_intensity(regionmask, intensity_image):
    return cp.median(intensity_image[regionmask])
Пример #7
0
    def calculate_correlations(record: OrderedDict,
                               averaging_method: str) -> tuple:
        """
        Calculates the auto- and cross-correlations for main and interferometer arrays given the bfiq data in record.

        Parameters
        ----------
        record: OrderedDict
            hdf5 record containing bfiq data and metadata
        averaging_method: str
            Averaging method. Supported types are 'mean' and 'median'

        Returns
        -------
        main_acfs: np.array
            Autocorrelation of the main array data
        intf_acfs: np.array
            Autocorrelation of the interferometer array data
        xcfs: np.array
            Cross-correlation of the main and interferometer arrays
        """
        # TODO: Figure out how to remove pulse offsets
        pulse_phase_offset = record['pulse_phase_offset']

        # bfiq data shape  = [num_arrays, num_sequences, num_beams, num_samps]
        bfiq_data = record['data']

        # Get the data and reshape
        num_arrays, num_sequences, num_beams, num_samps = record[
            'data_dimensions']
        bfiq_data = bfiq_data.reshape(record['data_dimensions'])

        num_lags = len(record['lags'])
        main_corrs_unavg = xp.zeros(
            (num_sequences, num_beams, record['num_ranges'], num_lags),
            dtype=xp.complex64)
        intf_corrs_unavg = xp.zeros(
            (num_sequences, num_beams, record['num_ranges'], num_lags),
            dtype=xp.complex64)
        cross_corrs_unavg = xp.zeros(
            (num_sequences, num_beams, record['num_ranges'], num_lags),
            dtype=xp.complex64)

        # Loop through every sequence and compute correlations.
        # Output shape after loop is [num_sequences, num_beams, num_range_gates, num_lags]
        for sequence in range(num_sequences):
            # data input shape  = [num_antenna_arrays, num_beams, num_samps]
            # data return shape = [num_beams, num_range_gates, num_lags]
            main_corrs_unavg[sequence, ...] = \
                ProcessBfiq2Rawacf.correlations_from_samples(bfiq_data[0, sequence, :, :],
                                                             bfiq_data[0, sequence, :, :],
                                                             record)
            intf_corrs_unavg[sequence, ...] = \
                ProcessBfiq2Rawacf.correlations_from_samples(bfiq_data[1, sequence, :, :],
                                                             bfiq_data[1, sequence, :, :],
                                                             record)
            cross_corrs_unavg[sequence, ...] = \
                ProcessBfiq2Rawacf.correlations_from_samples(bfiq_data[1, sequence, :, :],
                                                             bfiq_data[0, sequence, :, :],
                                                             record)

        if averaging_method == 'median':
            main_corrs = xp.median(
                xp.real(main_corrs_unavg),
                axis=0) + 1j * xp.median(xp.imag(main_corrs_unavg), axis=0)
            intf_corrs = xp.median(
                xp.real(intf_corrs_unavg),
                axis=0) + 1j * xp.median(xp.imag(intf_corrs_unavg), axis=0)
            cross_corrs = xp.median(
                xp.real(cross_corrs_unavg),
                axis=0) + 1j * xp.median(xp.imag(cross_corrs_unavg), axis=0)
        else:
            # Using mean averaging
            main_corrs = xp.einsum('ijkl->jkl',
                                   main_corrs_unavg) / num_sequences
            intf_corrs = xp.einsum('ijkl->jkl',
                                   intf_corrs_unavg) / num_sequences
            cross_corrs = xp.einsum('ijkl->jkl',
                                    cross_corrs_unavg) / num_sequences

        main_acfs = main_corrs.flatten()
        intf_acfs = intf_corrs.flatten()
        xcfs = cross_corrs.flatten()

        return main_acfs, intf_acfs, xcfs
Пример #8
0
def phasesymmono(img, nscale=5, minWaveLength=3, mult=2.1, sigmaOnf=0.55, k=2.,
                 polarity=0, noiseMethod=-1):
    """
    This function calculates the phase symmetry of points in an image. This is
    a contrast invariant measure of symmetry. This function can be used as a
    line and blob detector. The greyscale 'polarity' of the lines that you want
    to find can be specified.

    Arguments:
    -----------
    <Name>      <Default>   <Description>
    img             N/A     The input image
    nscale          5       Number of wavelet scales, try values 3-6
    minWaveLength   3       Wavelength of smallest scale filter.
    mult            2.1     Scaling factor between successive filters.
    sigmaOnf        0.55    Ratio of the standard deviation of the Gaussian
                            describing the log Gabor filter's transfer function
                            in the frequency domain to the filter center
                            frequency.
    k               2.0     No. of standard deviations of the noise energy
                            beyond the mean at which we set the noise threshold
                            point. You may want to vary this up to a value of
                            10 or 20 for noisy images.
    polarity        0       Controls 'polarity' of symmetry features to find.
                            1 only return 'bright' features
                            -1 only return 'dark' features
                            0 return both 'bright' and 'dark' features
    noiseMethod     -1      Parameter specifies method used to determine
                            noise statistics.
                            -1 use median of smallest scale filter responses
                            -2 use mode of smallest scale filter responses
                            >=0 use this value as the fixed noise threshold

    Returns:
    ---------
    phaseSym        Phase symmetry image (values between 0 and 1).
    totalEnergy     Un-normalised raw symmetry energy which may be more to your
                    liking.
    T               Calculated noise threshold (can be useful for diagnosing
                    noise characteristics of images). Once you know this you
                    can then specify fixed thresholds and save some computation
                    time.

    The convolutions are done via the FFT. Many of the parameters relate to the
    specification of the filters in the frequency plane. The values do not seem
    to be very critical and the defaults are usually fine. You may want to
    experiment with the values of 'nscales' and 'k', the noise compensation
    factor.

    Notes on filter settings to obtain even coverage of the spectrum
    sigmaOnf    .85   mult 1.3
    sigmaOnf    .75   mult 1.6  (filter bandwidth ~1 octave)
    sigmaOnf    .65   mult 2.1
    sigmaOnf    .55   mult 3    (filter bandwidth ~2 octaves)

    For maximum speed the input image should have dimensions that correspond to
    powers of 2, but the code will operate on images of arbitrary size.

    See also:   phasesym, which uses oriented filters and is therefore
                slower, but also returns an orientation map of the image

    References:
    ------------
    Peter Kovesi, "Symmetry and Asymmetry From Local Phase" AI'97, Tenth
    Australian Joint Conference on Artificial Intelligence. 2 - 4 December
    1997. http://www.cs.uwa.edu.au/pub/robvis/papers/pk/ai97.ps.gz.

    Peter Kovesi, "Image Features From Phase Congruency". Videre: A Journal of
    Computer Vision Research. MIT Press. Volume 1, Number 3, Summer 1999
    http://mitpress.mit.edu/e-journals/Videre/001/v13.html

    Michael Felsberg and Gerald Sommer, "A New Extension of Linear Signal
    Processing for Estimating Local Properties and Detecting Features". DAGM
    Symposium 2000, Kiel

    Michael Felsberg and Gerald Sommer. "The Monogenic Signal" IEEE
    Transactions on Signal Processing, 49(12):3136-3144, December 2001

    """

    if img.dtype not in ['float32', 'float64']:
        img = np.float64(img)
        imgdtype = 'float64'
    else:
        imgdtype = img.dtype

    if img.ndim == 3:
        img = img.mean(2)
    rows, cols = img.shape

    epsilon = 1E-4  # used to prevent /0.
    IM = fft2(img)  # Fourier transformed image

    zeromat = np.zeros((rows, cols), dtype=imgdtype)

    # Matrix for accumulating weighted phase congruency values (energy).
    totalEnergy = zeromat.copy()

    # Matrix for accumulating filter response amplitude values.
    sumAn = zeromat.copy()

    radius, u1, u2 = filtergrid(rows, cols)

    # Get rid of the 0 radius value at the 0 frequency point (at top-left
    # corner after fftshift) so that taking the log of the radius will not
    # cause trouble.
    radius[0, 0] = 1.

    # Construct the monogenic filters in the frequency domain. The two
    # filters would normally be constructed as follows
    #    H1 = i*u1./radius
    #    H2 = i*u2./radius
    # However the two filters can be packed together as a complex valued
    # matrix, one in the real part and one in the imaginary part. Do this by
    # multiplying H2 by i and then adding it to H1 (note the subtraction
    # because i*i = -1). When the convolution is performed via the fft the real
    # part of the result will correspond to the convolution with H1 and the
    # imaginary part with H2. This allows the two convolutions to be done as
    # one in the frequency domain, saving time and memory.
    H = (1j * u1 - u2) / radius

    # The two monogenic filters H1 and H2 are not selective in terms of the
    # magnitudes of the frequencies. The code below generates bandpass log-
    # Gabor filters which are point-wise multiplied by IM to produce different
    # bandpass versions of the image before being convolved with H1 and H2
    #
    # First construct a low-pass filter that is as large as possible, yet falls
    # away to zero at the boundaries. All filters are multiplied by this to
    # ensure no extra frequencies at the 'corners' of the FFT are incorporated
    # as this can upset the normalisation process when calculating phase
    # congruency
    lp = _lowpassfilter([rows, cols], .4, 10)
    # Radius .4, 'sharpness' 10
    logGaborDenom = 2. * np.log(sigmaOnf) ** 2.

    for ss in range(nscale):
        wavelength = minWaveLength * mult ** ss
        fo = 1. / wavelength  # Centre frequency of filter

        logRadOverFo = np.log(radius / fo)
        logGabor = np.exp(-(logRadOverFo * logRadOverFo) / logGaborDenom)
        logGabor *= lp      # Apply the low-pass filter
        logGabor[0, 0] = 0.  # Undo the radius fudge

        IMF = IM * logGabor   # Frequency bandpassed image
        f = np.real(ifft2(IMF))  # Spatially bandpassed image

        # Bandpassed monogenic filtering, real part of h contains convolution
        # result with h1, imaginary part contains convolution result with h2.
        h = ifft2(IMF * H)

        # Squared amplitude of the h1 and h2 filters
        hAmp2 = h.real * h.real + h.imag * h.imag

        # Magnitude of energy
        sumAn += np.sqrt(f * f + hAmp2)

        # At the smallest scale estimate noise characteristics from the
        # distribution of the filter amplitude responses stored in sumAn. tau
        # is the Rayleigh parameter that is used to describe the distribution.
        if ss == 0:
            # Use median to estimate noise statistics
            if noiseMethod == -1:
                tau = np.median(sumAn.flatten()) / np.sqrt(np.log(4))

            # Use the mode to estimate noise statistics
            elif noiseMethod == -2:
                tau = _rayleighmode(sumAn.flatten())

        # Calculate the phase symmetry measure

        # look for 'white' and 'black' spots
        if polarity == 0:
            totalEnergy += np.abs(f) - np.sqrt(hAmp2)

        # just look for 'white' spots
        elif polarity == 1:
            totalEnergy += f - np.sqrt(hAmp2)

        # just look for 'black' spots
        elif polarity == -1:
            totalEnergy += -f - np.sqrt(hAmp2)

    # Automatically determine noise threshold

    # Assuming the noise is Gaussian the response of the filters to noise will
    # form Rayleigh distribution. We use the filter responses at the smallest
    # scale as a guide to the underlying noise level because the smallest scale
    # filters spend most of their time responding to noise, and only
    # occasionally responding to features. Either the median, or the mode, of
    # the distribution of filter responses can be used as a robust statistic to
    # estimate the distribution mean and standard deviation as these are
    # related to the median or mode by fixed constants. The response of the
    # larger scale filters to noise can then be estimated from the smallest
    # scale filter response according to their relative bandwidths.

    # This code assumes that the expected reponse to noise on the phase
    # congruency calculation is simply the sum of the expected noise responses
    # of each of the filters. This is a simplistic overestimate, however these
    # two quantities should be related by some constant that will depend on the
    # filter bank being used. Appropriate tuning of the parameter 'k' will
    # allow you to produce the desired output.

    # fixed noise threshold
    if noiseMethod >= 0:
        T = noiseMethod

    # Estimate the effect of noise on the sum of the filter responses as the
    # sum of estimated individual responses (this is a simplistic
    # overestimate). As the estimated noise response at succesive scales is
    # scaled inversely proportional to bandwidth we have a simple geometric
    # sum.
    else:
        totalTau = tau * (1. - (1. / mult) ** nscale) / (1. - (1. / mult))

        # Calculate mean and std dev from tau using fixed relationship
        # between these parameters and tau. See
        # <http://mathworld.wolfram.com/RayleighDistribution.html>
        EstNoiseEnergyMean = totalTau * np.sqrt(np.pi / 2.)
        EstNoiseEnergySigma = totalTau * np.sqrt((4 - np.pi) / 2.)

        # Noise threshold, must be >= epsilon
        T = np.maximum(EstNoiseEnergyMean + k * EstNoiseEnergySigma,
                       epsilon)

    # Apply noise threshold - effectively wavelet denoising soft thresholding
    # and normalize symmetryEnergy by the sumAn to obtain phase symmetry. Note
    # the flooring operation is not necessary if you are after speed, it is
    # just 'tidy' not having -ve symmetry values
    phaseSym = np.maximum(totalEnergy - T, 0)
    phaseSym /= sumAn + epsilon

    return phaseSym, totalEnergy, T
Пример #9
0
def filter_experiment_local(in_recording,
                            stim_recording,
                            low_cutoff,
                            high_cutoff,
                            order=3,
                            cmr=False,
                            sample_chunk_size=65536,
                            n_samples=-1,
                            ram_copy=False,
                            whiten=False):
    channels = stim_recording.channels
    amps = stim_recording.amps
    scales = 1000 / amps
    n_channels = stim_recording.channels.shape[0]
    #     Optionally save file into a tmpfs partition for processing
    if ram_copy:
        in_ramfile = RamFile(in_recording.filepath, 'r')
        in_filepath = in_ramfile.ram_filepath
        out_ramfile = RamFile(in_recording.filtered_filepath, 'w')
        out_filepath = out_ramfile.ram_filepath
    else:
        in_filepath = in_recording.filepath
        out_filepath = in_recording.filtered_filepath
    in_fid = h5py.File(in_filepath, 'r')
    #     Create output file
    out_fid = h5py.File(out_filepath, 'w')
    if n_samples == -1:
        n_samples = in_fid['sig'].shape[1]
    out_mapping = in_fid['mapping'][stim_recording.connected_in_mapping]
    for i, m in enumerate(out_mapping):
        m[0] = i
    out_fid['mapping'] = out_mapping
    in_fid.copy('/message_0', out_fid)
    in_fid.copy('/proc0', out_fid)
    in_fid.copy('/settings', out_fid)
    in_fid.copy('/time', out_fid)
    in_fid.copy('/version', out_fid)
    if 'bits' in in_fid.keys():
        in_fid.copy('/bits', out_fid)
    out_fid.create_dataset("sig", (n_channels, n_samples), dtype='float32')
    #     Create filter: cutoff / 0.5 * fs
    sos = butter(order, [low_cutoff / 10000, high_cutoff / 10000],
                 'bandpass',
                 output='sos')
    #     Create chunks
    n_sample_chunks = n_samples / sample_chunk_size
    sample_chunks = np.hstack(
        (np.arange(n_sample_chunks, dtype=int) * sample_chunk_size, n_samples))
    out_fid.create_dataset('saturations', (n_channels, len(sample_chunks - 1)),
                           dtype='int32')
    out_fid.create_dataset('first_frame',
                           shape=(1, ),
                           data=in_fid["sig"][1027, 0] << 16
                           | in_fid["sig"][1026, 0])
    overlap = sample_chunk_size
    chunk = np.zeros((n_channels, sample_chunk_size + overlap))
    chunk[:, :overlap] = np.array([
        in_fid['sig'][channels, 0],
    ] * overlap).transpose()
    for i in trange(len(sample_chunks) - 1, ncols=100, position=0, leave=True):
        idx_from = sample_chunks[i]
        idx_to = sample_chunks[i + 1]
        chunk = chunk[:, :(idx_to - idx_from + overlap)]
        chunk[:, overlap:] = in_fid['sig'][channels, idx_from:idx_to]
        out_fid['saturations'][:, i] = np.count_nonzero(
            ((0 == chunk[:, overlap:]) | (chunk[:, overlap:] == 4095)), axis=1)
        cusig = cupy.asarray(chunk, dtype=cupy.float32)
        cusig = cusig - cupy.mean(cusig)
        cusig = cusignal.sosfilt(sos, cusig)
        cusig = cupy.fliplr(cusig)
        cusig = cusignal.sosfilt(sos, cusig)
        cusig = cupy.fliplr(cusig)
        cusig = cusig * cupy.asarray(scales, dtype=cupy.float32)[:, None]
        if cmr:
            cusig = cusig - cupy.median(cusig, axis=0)
        out_fid["sig"][:, idx_from:idx_to] = cupy.asnumpy(cusig[:, overlap:])
        chunk[:, :overlap] = chunk[:, -overlap:]


#     Writing filtered traces to disk...
    in_fid.close()
    out_fid.close()
    if ram_copy:
        in_ramfile.save()
        out_ramfile.save()
        del in_ramfile, out_ramfile
Пример #10
0
def dask_filter_chunk(in_rec_filepath,
                      channels,
                      idx_from,
                      idx_to,
                      scales,
                      low_cutoff,
                      high_cutoff,
                      order=3,
                      cmr=True,
                      whiten=True,
                      h5write=None):
    sos = butter(order, [low_cutoff / 10000, high_cutoff / 10000],
                 'bandpass',
                 output='sos')
    file = h5py.File(in_rec_filepath, 'r')
    sig = file['sig']
    chunk_size = idx_to - idx_from
    if idx_to > sig.shape[1]:
        idx_to = sig.shape[1]
        idx_from = idx_to - chunk_size
    if idx_from == 0:
        chunk = np.ones(
            (len(channels), chunk_size * 2)) * sig[channels, 0][:, np.newaxis]
        chunk[:, chunk_size:] = sig[channels, :idx_to]
    else:
        chunk = sig[channels, idx_from - chunk_size:idx_to]
    saturations = np.count_nonzero(
        ((0 == chunk[:, chunk_size:]) | (chunk[:, chunk_size:] == 4095)),
        axis=1)
    file.close()
    cusig = cupy.asarray(chunk, dtype=cupy.float32)
    cusig = cusig - cupy.mean(cusig)
    cusig = cusignal.sosfilt(sos, cusig)
    cusig = cupy.fliplr(cusig)
    cusig = cusignal.sosfilt(sos, cusig)
    cusig = cupy.fliplr(cusig)
    cusig = cusig * cupy.asarray(scales, dtype=cupy.float32)[:, None]
    if cmr:
        cusig = cusig - cupy.median(cusig, axis=0)
    cusig = cusig.get()
    if whiten:
        U, S, Vt = np.linalg.svd(cusig, full_matrices=False)
        w_chunk = np.dot(U, Vt)
        if h5write is not None:
            written = False
            while not written:
                try:
                    out_fid = h5py.File(h5write, 'r+')
                    out_fid['sig'][:, idx_from:idx_to] = cusig[:, chunk_size:]
                    out_fid[
                        'white_sig'][:, idx_from:idx_to] = w_chunk[:,
                                                                   chunk_size:]
                    out_fid.close()
                    written = True
                except:
                    time.sleep(0.05)
            return saturations
        else:
            return cusig, w_chunk, saturations
    else:
        if h5write is not None:
            written = False
            while not written:
                try:
                    out_fid = h5py.File(h5write, 'r+')
                    if idx_from == 0:
                        out_fid['sig'][:, :idx_to] = cusig[:, :idx_to]
                    out_fid['sig'][:,
                                   int(idx_from + chunk_size /
                                       2):idx_to] = cusig[:,
                                                          int(chunk_size / 2):]
                    out_fid.close()
                    written = True
                except:
                    time.sleep(0.05)
            return saturations
        else:
            return cusig, saturations
Пример #11
0
 def time_odd_small(self):
     np.median(self.o[:500], overwrite_input=True)
Пример #12
0
 def time_even_small(self):
     np.median(self.e[:500], overwrite_input=True)
Пример #13
0
 def time_odd_inplace(self):
     np.median(self.o, overwrite_input=True)
Пример #14
0
 def time_even_inplace(self):
     np.median(self.e, overwrite_input=True)
Пример #15
0
 def time_odd(self):
     np.median(self.o)
Пример #16
0
 def time_even(self):
     np.median(self.e)