Пример #1
0
    def compute(self):
        t = time.time()
        signal = self.getInputFromPort("Signal").get_array().squeeze()
        print "signal.shape = ", signal.shape
        f = scipy.fftpack.fft(signal)
        print "got f made"
        f = scipy.fftpack.hilbert(f)
        print "got hilbert done"
        #        f2 = numpy.concatenate((f,f))

        lof = self.getInputFromPort("Low Freq")
        hif = self.getInputFromPort("Hi Freq")

        out_ar = numpy.zeros((hif - lof + 1, signal.shape[0]))

        start = 0
        if lof == 0:
            out_ar[0, :] = signal.mean()
            start = 1

        for k in range(start, hif - lof, 1):
            g = self.get_gaussian(signal.shape[0], lof + k)
            o = scipy.fftpack.ifft(numpy.roll(f, lof + k) * g) / float(
                signal.size)
            out_ar[k, :] = o

        print "time = ", (time.time() - t) * 1000.
        out = NDArray()
        out.set_array(out_ar)
        self.setResult("Output", out)
Пример #2
0
    def compute(self):
        t = time.time()
        signal = self.get_input("Signal").get_array().squeeze()
        print "signal.shape = ", signal.shape
        f = scipy.fftpack.fft(signal)
        print "got f made"
        f = scipy.fftpack.hilbert(f)
        print "got hilbert done"
#        f2 = numpy.concatenate((f,f))

        lof = self.get_input("Low Freq")
        hif = self.get_input("Hi Freq")

        out_ar = numpy.zeros((hif-lof+1, signal.shape[0]))

        start = 0
        if lof == 0:
            out_ar[0,:] = signal.mean()
            start = 1

        for k in range(start, hif-lof, 1):
            g = self.get_gaussian(signal.shape[0], lof+k)
            o = scipy.fftpack.ifft(numpy.roll(f,lof+k) * g) / float(signal.size)
            out_ar[k,:] = o

        print "time = ", (time.time() - t) * 1000.
        out = NDArray()
        out.set_array(out_ar)
        self.set_output("Output", out)
def stFeatureExtraction(signal, Fs, Win, Step):
    """
    This function implements the shor-term windowing process. For each short-term window a set of features is extracted.
    This results to a sequence of feature vectors, stored in a numpy matrix.

    ARGUMENTS
        signal:       the input signal samples
        Fs:           the sampling freq (in Hz)
        Win:          the short-term window size (in samples)
        Step:         the short-term window step (in samples)
    RETURNS
        stFeatures:   a numpy array (numOfFeatures x numOfShortTermWindows)
    """

    Win = int(Win)
    Step = int(Step)

    # Signal normalization
    signal = numpy.double(signal)

    signal = signal / (2.0 ** 15)
    DC = signal.mean()
    MAX = (numpy.abs(signal)).max()
    signal = (signal - DC) / MAX

    N = len(signal)                                # total number of samples
    curPos = 0
    countFrames = 0
    nFFT = Win / 2

    numOfTimeSpectralFeatures = 8
    totalNumOfFeatures = numOfTimeSpectralFeatures 

    stFeatures = []
    while (curPos + Win - 1 < N):                        # for each short-term window until the end of signal
        countFrames += 1
        x = signal[curPos:curPos+Win]                    # get current window
        curPos = curPos + Step                           # update window position
        X = abs(fft(x))                                  # get fft magnitude
        X = X[0:nFFT]                                    # normalize fft
        X = X / len(X)
        if countFrames == 1:
            Xprev = X.copy()                             # keep previous fft mag (used in spectral flux)
        curFV = numpy.zeros((totalNumOfFeatures, 1))
        curFV[0] = stZCR(x)                              # zero crossing rate
        curFV[1] = stEnergy(x)                           # short-term energy
        curFV[2] = stEnergyEntropy(x)                    # short-term entropy of energy
        [curFV[3], curFV[4]] = stSpectralCentroidAndSpread(X, Fs)    # spectral centroid and spread
        curFV[5] = stSpectralEntropy(X)                  # spectral entropy
        curFV[6] = stSpectralFlux(X, Xprev)              # spectral flux
        curFV[7] = stSpectralRollOff(X, 0.90, Fs)        # spectral rolloff
        stFeatures.append(curFV)
        Xprev = X.copy()

    stFeatures = numpy.concatenate(stFeatures, 1)
    return stFeatures
Пример #4
0
def short_term_mspec(signal,
                     flen=0.025,
                     frate=0.01,
                     preemph=0.97,
                     srate=16000,
                     window=np.hamming):
    '''Short term magnitude spectrum.

    Args:
        signal (numpy.ndarray): The raw audio signal.
        flen (float): Frame duration in seconds.
        frate (int): Frame rate in Hertz.
        srate (int): Expected sampling rate of the audio.
        window (function): Windowing function (default: hamming).

    Returns:
        mspec (``numpy.ndarray``): Magnitude spectrum.
        fft_len (int): Length of the FFT used.

    '''
    # Normalize the the dynamic range of the signal.
    try:
        max_val = np.iinfo(signal.dtype).max
    except ValueError:
        max_val = np.finfo(signal.dtype).max
    signal = signal / max_val

    # Remove DC offset.
    signal -= signal.mean()

    # Convert the frame rate/length from second to number of samples.
    frate_samp = int(srate * frate)
    flen_samp = int(srate * flen)

    # Compute the number of frames.
    nframes = (len(signal) - flen_samp) // frate_samp + 1

    # Pre-emphasis filtering.
    s_t = np.array(signal, dtype=np.float32)
    s_t -= preemph * np.r_[s_t[0], s_t[:-1]]

    # Extract the overlapping frames.
    isize = s_t.dtype.itemsize
    sframes = np.lib.stride_tricks.as_strided(s_t,
                                              shape=(nframes, flen_samp),
                                              strides=(frate_samp * isize,
                                                       isize),
                                              writeable=False)

    # Apply the window function.
    frames = sframes * window(flen_samp)[None, :]

    # Compute FFT.
    fft_len = int(2**np.floor(np.log2(flen_samp) + 1))
    return np.abs(np.fft.rfft(frames, n=fft_len, axis=-1)[:, :-1]), fft_len
def autocorrel(signal, tmax, dt):
    """
    argument : signal (np.array), tmax and dt (float)
    tmax, is the maximum length of the autocorrelation that we want to see
    returns : autocorrel (np.array), time_shift (np.array)
    take a signal of time sampling dt, and returns its autocorrelation
     function between [0,tstop] (normalized) !!
    """
    steps = int(tmax / dt)  # number of steps to sum on
    signal = (signal - signal.mean()) / signal.std()
    cr = np.correlate(signal[steps:], signal) / steps
    time_shift = np.arange(len(cr)) * dt
    return cr / cr.max(), time_shift
def burst_stats(signal,peak_order,peak_percentile,dt):
    pop_burst_peak=scipy.signal.argrelmax(signal, order=peak_order)[0]
    pop_burst_peak=pop_burst_peak[signal[pop_burst_peak] >
                                  np.percentile(signal,peak_percentile)]
    pop_burst_trough=scipy.signal.argrelmin(signal, order=peak_order)[0]
    pop_burst_trough=pop_burst_trough[signal[pop_burst_trough] <
                                      np.percentile(signal,100.-peak_percentile)]
    ibi_vec=np.diff(pop_burst_peak)*dt/1000.0
    ibi_mean=np.mean(ibi_vec)
    ibi_cv=np.std(ibi_vec)/ibi_mean
    ibi_irregularity=irregularity_score(ibi_vec)
    amplitude_irregularity=irregularity_score(signal[pop_burst_peak])
    amplitude_cv=np.std(signal[pop_burst_peak])/np.mean(signal[pop_burst_peak])
    peak_to_trough=(signal[pop_burst_peak].mean() -
                    signal[pop_burst_trough].mean())/signal.mean()
    return (pop_burst_peak,pop_burst_trough,ibi_vec,ibi_mean,ibi_cv,
        ibi_irregularity, amplitude_irregularity, amplitude_cv, peak_to_trough)
Пример #7
0
    def forward(self, carrier, signal):

        signal = signal.mean(-1).clip(-1, 1)
        beta = int(self.beta) + 1

        if self._buffer is None:
            self._buffer = np.zeros((2 * beta, carrier.shape[1]),
                                    dtype=carrier.dtype)

        t1 = np.arange(beta, beta + len(carrier),
                       dtype='float64') + self.beta * signal
        t2 = t1.astype('int64')
        t3 = (t1 - t2.astype('float64'))[:, None]

        a0 = np.concatenate((self._buffer, carrier))
        a1 = a0[t2]
        a2 = a0[t2 + 1]
        a3 = a2 * t3 + a1 * (1 - t3)

        self._buffer = a0[-2 * beta:]

        return a3
Пример #8
0
def rauc(signal, baseline=None, bin_duration=None, t_start=None, t_stop=None):
    '''
    Calculate the rectified area under the curve (RAUC) for an AnalogSignal.

    The signal is optionally divided into bins with duration `bin_duration`,
    and the rectified signal (absolute value) is integrated within each bin to
    find the area under the curve. The mean or median of the signal or an
    arbitrary baseline may optionally be subtracted before rectification. If
    the number of bins is 1 (default), a single value is returned for each
    channel in the input signal. Otherwise, an AnalogSignal containing the
    values for each bin is returned along with the times of the centers of the
    bins.

    Parameters
    ----------
    signal : neo.AnalogSignal
        The signal to integrate. If `signal` contains more than one channel,
        each is integrated separately.
    bin_duration : quantities.Quantity
        The length of time that each integration should span. If None, there
        will be only one bin spanning the entire signal duration. If
        `bin_duration` does not divide evenly into the signal duration, the end
        of the signal is padded with zeros to accomodate the final,
        overextending bin.
        Default: None
    baseline : string or quantities.Quantity
        A factor to subtract from the signal before rectification. If `'mean'`
        or `'median'`, the mean or median value of the entire signal is
        subtracted on a channel-by-channel basis.
        Default: None
    t_start, t_stop : quantities.Quantity
        Times to start and end the algorithm. The signal is cropped using
        `signal.time_slice(t_start, t_stop)` after baseline removal. Useful if
        you want the RAUC for a short section of the signal but want the
        mean or median calculation (`baseline='mean'` or `baseline='median'`)
        to use the entire signal for better baseline estimation.
        Default: None

    Returns
    -------
    quantities.Quantity or neo.AnalogSignal
        If the number of bins is 1, the returned object is a scalar or
        vector Quantity containing a single RAUC value for each channel.
        Otherwise, the returned object is an AnalogSignal containing the
        RAUC(s) for each bin stored as a sample, with times corresponding to
        the center of each bin. The output signal will have the same number
        of channels as the input signal.

    Raises
    ------
    TypeError
        If the input signal is not a neo.AnalogSignal.
    TypeError
        If `bin_duration` is not None or a Quantity.
    TypeError
        If `baseline` is not None, `'mean'`, `'median'`, or a Quantity.
    '''

    if not isinstance(signal, neo.AnalogSignal):
        raise TypeError('Input signal is not a neo.AnalogSignal!')

    if baseline is None:
        pass
    elif baseline is 'mean':
        # subtract mean from each channel
        signal = signal - signal.mean(axis=0)
    elif baseline is 'median':
        # subtract median from each channel
        signal = signal - np.median(signal.as_quantity(), axis=0)
    elif isinstance(baseline, pq.Quantity):
        # subtract arbitrary baseline
        signal = signal - baseline
    else:
        raise TypeError('baseline must be None, \'mean\', \'median\', '
                        'or a Quantity: {}'.format(baseline))

    # slice the signal after subtracting baseline
    signal = signal.time_slice(t_start, t_stop)

    if bin_duration is not None:
        # from bin duration, determine samples per bin and number of bins
        if isinstance(bin_duration, pq.Quantity):
            samples_per_bin = int(
                np.round(
                    bin_duration.rescale('s') /
                    signal.sampling_period.rescale('s')))
            n_bins = int(np.ceil(signal.shape[0] / samples_per_bin))
        else:
            raise TypeError(
                'bin_duration must be a Quantity: {}'.format(bin_duration))
    else:
        # all samples in one bin
        samples_per_bin = signal.shape[0]
        n_bins = 1

    # store the actual bin duration
    bin_duration = samples_per_bin * signal.sampling_period

    # reshape into equal size bins, padding the end with zeros if necessary
    n_channels = signal.shape[1]
    sig_binned = signal.as_quantity().copy()
    sig_binned.resize(n_bins * samples_per_bin, n_channels, refcheck=False)
    sig_binned = sig_binned.reshape(n_bins, samples_per_bin, n_channels)

    # rectify and integrate over each bin
    rauc = np.trapz(np.abs(sig_binned), dx=signal.sampling_period, axis=1)

    if n_bins == 1:
        # return a single value for each channel
        return rauc.squeeze()

    else:
        # return an AnalogSignal with times corresponding to center of each bin
        rauc_sig = neo.AnalogSignal(
            rauc,
            t_start=signal.t_start.rescale(bin_duration.units) +
            bin_duration / 2,
            sampling_period=bin_duration)
        return rauc_sig
Пример #9
0
def rauc(signal, baseline=None, bin_duration=None, t_start=None, t_stop=None):
    """
    Calculate the rectified area under the curve (RAUC) for a
    `neo.AnalogSignal`.

    The signal is optionally divided into bins with duration `bin_duration`,
    and the rectified signal (absolute value) is integrated within each bin to
    find the area under the curve. The mean or median of the signal or an
    arbitrary baseline may optionally be subtracted before rectification.

    Parameters
    ----------
    signal : neo.AnalogSignal
        The signal to integrate. If `signal` contains more than one channel,
        each is integrated separately.
    baseline : pq.Quantity or {'mean', 'median'}, optional
        A factor to subtract from the signal before rectification.
        If 'mean', the mean value of the entire `signal` is subtracted on a
        channel-by-channel basis.
        If 'median', the median value of the entire `signal` is subtracted on
        a channel-by-channel basis.
        Default: None
    bin_duration : pq.Quantity, optional
        The length of time that each integration should span.
        If None, there will be only one bin spanning the entire signal
        duration.
        If `bin_duration` does not divide evenly into the signal duration, the
        end of the signal is padded with zeros to accomodate the final,
        overextending bin.
        Default: None
    t_start : pq.Quantity, optional
        Time to start the algorithm.
        If None, starts at the beginning of `signal`.
        Default: None
    t_stop : pq.Quantity, optional
        Time to end the algorithm.
        If None, ends at the last time of `signal`.
        The signal is cropped using `signal.time_slice(t_start, t_stop)` after
        baseline removal. Useful if you want the RAUC for a short section of
        the signal but want the mean or median calculation (`baseline`='mean'
        or `baseline`='median') to use the entire signal for better baseline
        estimation.
        Default: None

    Returns
    -------
    pq.Quantity or neo.AnalogSignal
        If the number of bins is 1, the returned object is a scalar or
        vector `pq.Quantity` containing a single RAUC value for each channel.
        Otherwise, the returned object is a `neo.AnalogSignal` containing the
        RAUC(s) for each bin stored as a sample, with times corresponding to
        the center of each bin. The output signal will have the same number
        of channels as the input signal.

    Raises
    ------
    ValueError
        If `signal` is not `neo.AnalogSignal`.

        If `bin_duration` is not None or `pq.Quantity`.

        If `baseline` is not None, 'mean', 'median', or `pq.Quantity`.

    See Also
    --------
    neo.AnalogSignal.time_slice : how `t_start` and `t_stop` are used

    Examples
    --------
    >>> import neo
    >>> import numpy as np
    >>> import quantities as pq
    >>> from elephant.signal_processing import rauc
    >>> signal = neo.AnalogSignal(np.arange(10), sampling_rate=20 * pq.Hz,
    ...     units='mV')
    >>> rauc(signal)
    array(2.025) * mV/Hz

    """

    if not isinstance(signal, neo.AnalogSignal):
        raise ValueError('Input signal is not a neo.AnalogSignal!')

    if baseline is None:
        pass
    elif baseline == 'mean':
        # subtract mean from each channel
        signal = signal - signal.mean(axis=0)
    elif baseline == 'median':
        # subtract median from each channel
        signal = signal - np.median(signal.as_quantity(), axis=0)
    elif isinstance(baseline, pq.Quantity):
        # subtract arbitrary baseline
        signal = signal - baseline
    else:
        raise ValueError("baseline must be either None, 'mean', 'median', or "
                         "a Quantity. Got {}".format(baseline))

    # slice the signal after subtracting baseline
    signal = signal.time_slice(t_start, t_stop)

    if bin_duration is not None:
        # from bin duration, determine samples per bin and number of bins
        if isinstance(bin_duration, pq.Quantity):
            samples_per_bin = int(
                np.round(
                    bin_duration.rescale('s') /
                    signal.sampling_period.rescale('s')))
            n_bins = int(np.ceil(signal.shape[0] / samples_per_bin))
        else:
            raise ValueError(
                "bin_duration must be a Quantity. Got {}".format(bin_duration))
    else:
        # all samples in one bin
        samples_per_bin = signal.shape[0]
        n_bins = 1

    # store the actual bin duration
    bin_duration = samples_per_bin * signal.sampling_period

    # reshape into equal size bins, padding the end with zeros if necessary
    n_channels = signal.shape[1]
    sig_binned = signal.as_quantity().copy()
    sig_binned.resize(n_bins * samples_per_bin, n_channels, refcheck=False)
    sig_binned = sig_binned.reshape(n_bins, samples_per_bin, n_channels)

    # rectify and integrate over each bin
    rauc = np.trapz(np.abs(sig_binned), dx=signal.sampling_period, axis=1)

    if n_bins == 1:
        # return a single value for each channel
        return rauc.squeeze()

    else:
        # return an AnalogSignal with times corresponding to center of each bin
        t_start = signal.t_start.rescale(bin_duration.units) + bin_duration / 2
        rauc_sig = neo.AnalogSignal(rauc,
                                    t_start=t_start,
                                    sampling_period=bin_duration)
        return rauc_sig
Пример #10
0
3) Convoluting the serie with the filter
4) Comparing the series (original, moving averages and Gaussian smoothed)
"""

filt = gaussian(31, 4)
filt /= sum(filt)
figure(6)
plot(filt)
cot_after_Gsmooth = convolve(cot_after[:, 1], filt, mode="valid")
figure(7, figsize=(14, 10))
plot(cot_after[:, 0], cot_after[:, 1], "r")
plot(cot_after[10:-10, 0], movavg(cot_after[:, 1] - 9000, 21), "g")
plot(cot_after[15:-15, 0], cot_after_Gsmooth + 9000, "b")

"""Calculating the Cross-correlation of two 1-dimensional sequences:
http://docs.scipy.org/doc/numpy/reference/generated/numpy.correlate.html    
1) Subctrating the mean
2) Adding the a vector of zeros of the same size, to run the autocorrelation
(optionally you can analyse just a part of the series
3) Normalize dividing by the values of the first element
"""

cotsubmedia = cot_after - mean(cot_after)
corr = correlate(cotsubmedia, concatenate((cotsubmedia, zeros_like(cotsubmedia))), mode="valid")
# corr = corr[:1000]
corr /= corr[0]
figure(8, figsize=(14, 10))
title("Cross Correlation Function")
plot(corr)
show()
Пример #11
0
def generate_feat_opts(path=None,
                       cfg={
                           'pkg': 'pysp',
                           'type': 'logfbank',
                           'nfilt': 40,
                           'delta': 2
                       },
                       signal=None,
                       rate=16000):
    cfg = dict(cfg)
    if cfg['pkg'] == 'pysp':  # python_speech_features #
        if signal is None:
            rate, signal = wavfile.read(path)

        if cfg['type'] == 'logfbank':
            feat_mat = pyspfeat.base.logfbank(signal,
                                              rate,
                                              nfilt=cfg.get('nfilt', 40))
        elif cfg['type'] == 'mfcc':
            feat_mat = pyspfeat.base.mfcc(signal,
                                          rate,
                                          numcep=cfg.get('nfilt', 26) // 2,
                                          nfilt=cfg.get('nfilt', 26))
        elif cfg['type'] == 'wav':
            feat_mat = pyspfeat.base.sigproc.framesig(
                signal,
                frame_len=cfg.get('frame_len', 400),
                frame_step=cfg.get('frame_step', 160))
        else:
            raise NotImplementedError(
                "feature type {} is not implemented/available".format(
                    cfg['type']))
            pass
        # delta #
        comb_feat_mat = [feat_mat]
        delta = cfg['delta']
        if delta > 0:
            delta_feat_mat = pyspfeat.base.delta(feat_mat, 2)
            comb_feat_mat.append(delta_feat_mat)
        if delta > 1:
            delta2_feat_mat = pyspfeat.base.delta(delta_feat_mat, 2)
            comb_feat_mat.append(delta2_feat_mat)
        if delta > 2:
            raise NotImplementedError(
                "max delta is 2, larger than 2 is not normal setting")
        return np.hstack(comb_feat_mat)
    elif cfg['pkg'] == 'rosa':
        if signal is None:
            signal, rate = librosa.core.load(path, sr=cfg['sample_rate'])

        assert rate == cfg[
            'sample_rate'], "sample rate is different with current data"

        if cfg.get('preemphasis', None) is not None:
            # signal = np.append(signal[0], signal[1:] - cfg['preemphasis']*signal[:-1])
            signal = signal_util.preemphasis(x, self.cfg['preemphasis'])

        if cfg.get('pre', None) == 'meanstd':
            signal = (signal - signal.mean()) / signal.std()
        elif cfg.get('pre', None) == 'norm':
            signal = (signal - signal.min()) / (signal.max() -
                                                signal.min()) * 2 - 1

        # raw feature
        if cfg['type'] == 'wav':
            if cfg.get('post', None) == 'mu':
                signal = linear2mu(signal)

            feat_mat = pyspfeat.base.sigproc.framesig(
                signal,
                frame_len=cfg.get('frame_len', 400),
                frame_step=cfg.get('frame_step', 160))
            return feat_mat
        # spectrogram-based feature
        raw_spec = signal_util.rosa_spectrogram(
            signal,
            n_fft=cfg['nfft'],
            hop_length=cfg.get('winstep', None),
            win_length=cfg.get('winlen', None))[0]
        if cfg['type'] in ['logmelfbank', 'melfbank']:
            mel_spec = signal_util.rosa_spec2mel(raw_spec, nfilt=cfg['nfilt'])
            if cfg['type'] == 'logmelfbank':
                return np.log(mel_spec)
            else:
                return mel_spec
        elif cfg['type'] == 'lograwfbank':
            return np.log(raw_spec)
        elif cfg['type'] == 'rawfbank':
            return raw_spec
        else:
            raise NotImplementedError()
    elif cfg['pkg'] == 'taco':
        # SPECIAL FOR TACOTRON #
        tacohelper = TacotronHelper(cfg)
        if signal is None:
            signal = tacohelper.load_wav(path)

        assert len(signal) != 0, ('file {} is empty'.format(path))

        try:
            if cfg['type'] == 'raw':
                feat = tacohelper.spectrogram(signal).T
            elif cfg['type'] == 'mel':
                feat = tacohelper.melspectrogram(signal).T
            else:
                raise NotImplementedError()
        except:
            import ipdb
            ipdb.set_trace()
            pass
        return feat
    elif cfg['pkg'] == 'world':
        if path is None:
            with tempfile.NamedTemporaryFile() as tmpfile:
                wavfile.write(tmpfile.name, rate, signal)
                logf0, bap, mgc = world_vocoder_util.world_analysis(
                    tmpfile.name, cfg['mcep'])
        else:
            logf0, bap, mgc = world_vocoder_util.world_analysis(
                path, cfg['mcep'])

        vuv, f0, bap, mgc = world_vocoder_util.world2feat(logf0, bap, mgc)

        # ignore delta, avoid curse of dimensionality #
        return vuv, f0, bap, mgc
    else:
        raise NotImplementedError()
        pass
Пример #12
0
def rauc(signal, baseline=None, bin_duration=None, t_start=None, t_stop=None):
    '''
    Calculate the rectified area under the curve (RAUC) for an AnalogSignal.

    The signal is optionally divided into bins with duration `bin_duration`,
    and the rectified signal (absolute value) is integrated within each bin to
    find the area under the curve. The mean or median of the signal or an
    arbitrary baseline may optionally be subtracted before rectification. If
    the number of bins is 1 (default), a single value is returned for each
    channel in the input signal. Otherwise, an AnalogSignal containing the
    values for each bin is returned along with the times of the centers of the
    bins.

    Parameters
    ----------
    signal : neo.AnalogSignal
        The signal to integrate. If `signal` contains more than one channel,
        each is integrated separately.
    bin_duration : quantities.Quantity
        The length of time that each integration should span. If None, there
        will be only one bin spanning the entire signal duration. If
        `bin_duration` does not divide evenly into the signal duration, the end
        of the signal is padded with zeros to accomodate the final,
        overextending bin.
        Default: None
    baseline : string or quantities.Quantity
        A factor to subtract from the signal before rectification. If `'mean'`
        or `'median'`, the mean or median value of the entire signal is
        subtracted on a channel-by-channel basis.
        Default: None
    t_start, t_stop : quantities.Quantity
        Times to start and end the algorithm. The signal is cropped using
        `signal.time_slice(t_start, t_stop)` after baseline removal. Useful if
        you want the RAUC for a short section of the signal but want the
        mean or median calculation (`baseline='mean'` or `baseline='median'`)
        to use the entire signal for better baseline estimation.
        Default: None

    Returns
    -------
    quantities.Quantity or neo.AnalogSignal
        If the number of bins is 1, the returned object is a scalar or
        vector Quantity containing a single RAUC value for each channel.
        Otherwise, the returned object is an AnalogSignal containing the
        RAUC(s) for each bin stored as a sample, with times corresponding to
        the center of each bin. The output signal will have the same number
        of channels as the input signal.

    Raises
    ------
    TypeError
        If the input signal is not a neo.AnalogSignal.
    TypeError
        If `bin_duration` is not None or a Quantity.
    TypeError
        If `baseline` is not None, `'mean'`, `'median'`, or a Quantity.
    '''

    if not isinstance(signal, neo.AnalogSignal):
        raise TypeError('Input signal is not a neo.AnalogSignal!')

    if baseline is None:
        pass
    elif baseline is 'mean':
        # subtract mean from each channel
        signal = signal - signal.mean(axis=0)
    elif baseline is 'median':
        # subtract median from each channel
        signal = signal - np.median(signal.as_quantity(), axis=0)
    elif isinstance(baseline, pq.Quantity):
        # subtract arbitrary baseline
        signal = signal - baseline
    else:
        raise TypeError(
            'baseline must be None, \'mean\', \'median\', '
            'or a Quantity: {}'.format(baseline))

    # slice the signal after subtracting baseline
    signal = signal.time_slice(t_start, t_stop)

    if bin_duration is not None:
        # from bin duration, determine samples per bin and number of bins
        if isinstance(bin_duration, pq.Quantity):
            samples_per_bin = int(np.round(
                bin_duration.rescale('s')/signal.sampling_period.rescale('s')))
            n_bins = int(np.ceil(signal.shape[0]/samples_per_bin))
        else:
            raise TypeError(
                'bin_duration must be a Quantity: {}'.format(bin_duration))
    else:
        # all samples in one bin
        samples_per_bin = signal.shape[0]
        n_bins = 1

    # store the actual bin duration
    bin_duration = samples_per_bin * signal.sampling_period

    # reshape into equal size bins, padding the end with zeros if necessary
    n_channels = signal.shape[1]
    sig_binned = signal.as_quantity().copy()
    sig_binned.resize(n_bins * samples_per_bin, n_channels)
    sig_binned = sig_binned.reshape(n_bins, samples_per_bin, n_channels)

    # rectify and integrate over each bin
    rauc = np.trapz(np.abs(sig_binned), dx=signal.sampling_period, axis=1)

    if n_bins == 1:
        # return a single value for each channel
        return rauc.squeeze()

    else:
        # return an AnalogSignal with times corresponding to center of each bin
        rauc_sig = neo.AnalogSignal(
            rauc,
            t_start=signal.t_start.rescale(bin_duration.units)+bin_duration/2,
            sampling_period=bin_duration)
        return rauc_sig
Пример #13
0
def main_examples(ntrains):

    alphas, threshes, ks, snrs, freqs, t, base_frequency, \
      train_freq_hists, example_trains, example_signals, waves, peaks = get_results(ntrains)

    # remove the alpha=0.5, doesn't really add anything
    alphas = alphas[1:]
    waves = waves[1:, ...]
    peaks = peaks[1:, ...]
    example_trains = example_trains[1:, ...]
    example_signals = example_signals[1:, ...]
    train_freq_hists = train_freq_hists[1:, ...]

    nalpha, nsnr, ntrains, nthresh, nfreq = peaks.shape
    nks = len(ks)
    log2_freqs = np.log2(freqs)

    np.random.seed(2002171330)
    bootci_kwargs = dict(statfunc=lambda _x: np.nanmean(_x, axis=0),
                         alpha=0.05,
                         n_samples=1000)

    snr_example_idxs = [0]
    for s in [0.1, 0.3, 1, 3]:
        snr_example_idxs.append(np.argmin(np.abs(snrs - s)))

    log2_freq_edges = utils.make_edges(log2_freqs)
    freq_ticks = [1, 2, 4, 8, 16, 32, 64]
    freq_ticklabels = freq_ticks

    wave_kwargs = [
        dict(facecolor=c, edgecolor=c, alpha=0.5, zorder=100 - ci)
        for ci, c in enumerate(['0', '0.3', '0.5'])
    ]
    peak_kwargs = [
        dict(facecolor=c, edgecolor=c, alpha=0.5, zorder=90 - ci) for ci, c in
        enumerate(plt.rcParams['axes.prop_cycle'].by_key()['color'][:nthresh])
    ]

    # axes widths and x-positions (indexed from left)
    left_margin = 0.5
    right_margin = 0.1
    column_margin = 0.15
    width_ratios = [left_margin] + reduce(
        (lambda a, b: a + [column_margin] + b),
        [[1]] * nalpha) + [right_margin]
    xs, ws = ratios_to_pos_and_size(width_ratios)

    # axes heights and y-positions (indexed from bottom)
    top_margin = 0.8
    bottom_margin = 0.6
    height_ratios = [[1.0]] * len(snr_example_idxs) + [[1.5], [0.5], [0.5]]
    height_ratios = [bottom_margin] + reduce(
        (lambda a, b: a + [0.1] + b), height_ratios) + [top_margin]
    height_ratios[-3] = 0.8
    height_ratios[-5] = 0.6
    height_ratios[-7] = 1.1
    ys, hs = ratios_to_pos_and_size(height_ratios)

    fig = plt.figure(figsize=(9, 12))
    fig.text(0.5,
             0.98,
             'Exploration of noise and irregularity',
             fontsize=16,
             ha='center',
             va='center')
    for ai, alpha in enumerate(alphas):

        alpha_value_label = f'{alpha}'.rstrip('0').rstrip('.')
        if alpha > 1:
            sigma_value_label = f'1/{alpha_value_label}'
        else:
            sigma_value_label = f'{1/alpha}'.rstrip('0').rstrip('.')
        print(f'alpha = {alpha_value_label} ({ai+1}/{nalpha})')

        # -----------------------------------------------------------------
        #   1/ISI histogram

        print('  histogram')

        # plot
        ax_isi = fig.add_subplot(position=[xs[ai], ys[-1], ws[ai], hs[-1]])
        ax_isi.bar(log2_freq_edges[:-1],
                   train_freq_hists[ai],
                   width=np.diff(log2_freq_edges),
                   align='edge',
                   color='k')

        # configure axes
        ax_isi.set_xticks([], minor=True)
        ax_isi.set_xticks(np.log2(freq_ticks))
        ax_isi.set_xticklabels(freq_ticklabels)
        ax_isi.set_title(f'$\\sigma$ = {sigma_value_label}')
        for spine in ['left', 'top', 'right']:
            ax_isi.spines[spine].set_visible(False)
        ax_isi.set_yticks([])
        if ai == 0:
            ax_isi.set_ylabel('True\ndistribution', labelpad=10)
        ax_isi.set_xlabel('Frequency (ISI$^{-1}$)')
        ax_isi.set_xlim([log2_freq_edges[0], log2_freq_edges[-1]])

        # -----------------------------------------------------------------
        #   Raster and signal examples

        print('  raster and signal examples')

        # plot raster
        ax_r = fig.add_subplot(position=[xs[ai], ys[-2], ws[ai], hs[-2]])
        for i in range(example_trains.shape[1]):
            train = example_trains[ai, i]
            spikes = t[np.where(train > 0)[0]]
            ax_r.scatter(spikes, [i] * len(spikes), marker='.', color='k', s=1)

        # plot noisy signal
        ax_s = fig.add_subplot(position=[xs[ai], ys[-3], ws[ai], hs[-3]])
        for y, snri in enumerate(snr_example_idxs):
            signal = example_signals[ai, snri]
            signal = (signal - signal.mean()) / (
                0.3 + signal.std())  # wierd scaling for visual aesthetic
            ax_s.plot(t, signal + 4 * y, c='k', lw=1)

        # configure axes
        if ai == 0:
            ax_r.set_ylabel('True\nraster', labelpad=10)
            ax_s.set_ylabel('SNR examples')
        for ax in [ax_r, ax_s]:
            ax.set_xlim([0, 1])
            for spine in ['left', 'top', 'right']:
                ax.spines[spine].set_visible(False)
            ax.set_yticks([])
            ax.set_xlabel('Time')
            ax.set_xticks([0, 0.25, 0.5, 0.75, 1.0])
            ax.set_xticklabels([f'{tick:g}' for tick in ax.get_xticks()])
        if ai == 0:
            ax_s.set_yticks(4 * np.arange(len(snr_example_idxs)))
            ax_s.set_yticklabels([f'{s:.2g}' for s in snrs[snr_example_idxs]])
            ax_s.yaxis.set_tick_params(length=0)

        # -----------------------------------------------------------------
        #   Wavelet and peak examples

        print('  wavelet and peak examples')

        for i, snr_idx in enumerate(snr_example_idxs):
            ax = fig.add_subplot(position=[xs[ai], ys[i], ws[ai], hs[i]])

            snr_label = f'{snrs[snr_idx]:.2g}'
            print(
                f'    example snr {snr_label} ({i+1}/{len(snr_example_idxs)})')

            # nalpha, nsnr, ntrains, nthresh, nfreq = peaks.shape
            # nalpha, nsnr, ntrains, nks    , nfreq = waves.shape

            # plot wavelet
            for j in range(nks):
                y = waves[ai, snr_idx, :, j]
                y = y / np.sum(y, axis=-1, keepdims=True)
                ci = np.sqrt(bootci_pi(y, **bootci_kwargs))
                ax.fill_between(log2_freqs,
                                ci[0],
                                ci[1],
                                label=f'Mesaclip ($k$={ks[j]})',
                                **wave_kwargs[j])

            # plot peak
            for j in range(nthresh):
                y = peaks[ai, snr_idx, :, j]
                y = y / np.sum(y, axis=-1, keepdims=True)
                ci = np.sqrt(bootci_pi(y, **bootci_kwargs))
                ax.fill_between(log2_freqs,
                                ci[0],
                                ci[1],
                                label=f'Peak ($\\theta$={threshes[j]})',
                                **peak_kwargs[j])

            # configure axes
            ax.set_yticks([])
            ax.set_xticks([])
            ax.set_xlim(log2_freq_edges[0], log2_freq_edges[-1])
            if ai == 0:
                ax.set_ylabel(f'SNR\n{snr_label}', fontsize=10, labelpad=10)
            if i == 0:
                ax.set_xticks(np.log2(freq_ticks))
                ax.set_xticklabels(freq_ticklabels)
                ax.set_xlabel('Frequency')

        if ai == 0:
            handles, labels = ax.get_legend_handles_labels()
            handles = list(np.array(handles).reshape(2, -1).T.flatten())
            labels = list(np.array(labels).reshape(2, -1).T.flatten())
            ax.legend(handles,
                      labels,
                      loc='upper left',
                      ncol=3,
                      bbox_to_anchor=(-0.05, 1.6))

    fig.savefig('../output/snr_vs_peak_detect_examples.png', dpi=600)
    plt.close(fig)
Пример #14
0
def CNR(img,
        croi_signal=[],
        croi_background=[],
        froi_signal=[],
        froi_background=[]):
    """
	This function computes the Contrast-to-Noise Ratio (CNR) as reported in
	the equation (2.7) of [1]_.
	The ROI of the signal and the background can be defined using two lists of
	coordinates or two ImageJ .roi files.

	Parameters
	----------
	img :  2d array
		The array representing the image.

	croi_signal : list
		List that contains the following coordinate of the signal roi: [rowmin, rowmax, colmin, colmax].

	croi_background : list
		List that contains the following coordinate of the background roi: [rowmin, rowmax, colmin, colmax].

	froi_signal : string
		Path of the imagej file containing the rectangular ROI of the signal.

	froi_background : string
		Path of the imagej file containing the rectangular ROI of the background.

	Returns
	-------
	CNR : float
		The CNR value computed using the ROIs given.

	References
	----------
	.. [1] D. Micieli et al., A comparative analysis of reconstruction methods
	 applied to Neutron Tomography, Journal of Instrumentation, Volume 13,
	 June 2018.
	"""

    if (img.ndim != 2):
        raise ValueError("The input array must have 2 dimensions.")

    if (croi_signal and froi_signal):
        raise ValueError(
            "Only one method to define the ROI is accepted. Please pass croi_singal or froi_signal."
        )

    if (croi_background and froi_background):
        raise ValueError(
            "Only one method to define the ROI is accepted. Please pass croi_background or froi_background."
        )

    if (croi_signal):
        rowmin, rowmax, colmin, colmax = croi_signal
    if (froi_signal):
        rowmin, rowmax, colmin, colmax = get_rect_coordinates_from_roi(
            froi_signal)

    signal = img[rowmin:(rowmax + 1), colmin:(colmax + 1)]

    if (croi_background):
        rowmin, rowmax, colmin, colmax = croi_background
    elif (froi_background):
        rowmin, rowmax, colmin, colmax = get_rect_coordinates_from_roi(
            froi_background)

    background = img[rowmin:(rowmax + 1), colmin:(colmax + 1)]

    cnr_val = (signal.mean() - background.mean()) / background.std()

    return cnr_val