Ejemplo n.º 1
0
 def test_nextpow2(self):
     """Test the nextpow2 function."""
     self.assertEqual(utils.nextpow2(2), 1)
     self.assertEqual(utils.nextpow2(17), 5)
     import warnings
     with warnings.catch_warnings(record=True) as catcher:
         utils.nextpow2(-3)
         self.assertEqual(len(catcher), 1)
         self.assertTrue(catcher[-1].category, RuntimeWarning)
Ejemplo n.º 2
0
 def test_nextpow2(self):
     """Test the nextpow2 function."""
     self.assertEqual(utils.nextpow2(2), 1)
     self.assertEqual(utils.nextpow2(17), 5)
     import warnings
     with warnings.catch_warnings(record=True) as catcher:
         utils.nextpow2(-3)
         self.assertEqual(len(catcher), 1)
         self.assertTrue(catcher[-1].category, RuntimeWarning)
Ejemplo n.º 3
0
def noisecu(n_points):
    """Compute analytic complex uniform white noise.

    :param n_points: Length of the noise signal.
    :type n_points: int
    :return: analytic complex uniform white noise signal of length N
    :rtype: numpy.ndarray
    :Examples:
    >>> import numpy as np
    >>> noise = noisecu(512)
    >>> print("%.2f" % abs((noise ** 2).mean()))
    0.00
    >>> print("%.1f" % np.std(noise) ** 2)
    1.0
    >>> subplot(211), plot(real(noise))                                              #doctest: +SKIP
    >>> subplot(212), plot(linspace(-0.5, 0.5, 512), abs(fftshift(fft(noise))) ** 2) #doctest: +SKIP

    .. plot:: docstring_plots/generators/noise/noisecu.py
    """
    if n_points <= 2:
        noise = (np.random.rand(n_points, 1) - 0.5 + 1j * (np.random.rand(n_points, 1) - 0.5)) * np.sqrt(6)
    else:
        noise = np.random.rand(2 ** int(nextpow2(n_points)),) - 0.5
        noise = hilbert(noise) / noise.std() / np.sqrt(2)
        inds = noise.shape[0] - np.arange(n_points - 1, -1, step=-1) - 1
        noise = noise[inds]
    return noise
Ejemplo n.º 4
0
def noisecg(n_points, a1=None, a2=None):
    """
    Generate analytic complex gaussian noise with mean 0.0 and variance 1.0.

    :param n_points: Length of the desired output signal.
    :param a1:
        Coefficients of the filter through which the noise is passed.
    :param a2:
        Coefficients of the filter through which the noise is passed.
    :type n_points: int
    :type a1: float
    :type a2: float
    :return: Analytic complex Gaussian noise of length n_points.
    :rtype: numpy.ndarray
    :Examples:
    >>> import numpy as np
    >>> noise = noisecg(512)
    >>> print("%.2f" % abs((noise ** 2).mean()))
    0.00
    >>> print("%.1f" % np.std(noise) ** 2)
    1.0
    >>> subplot(211), plot(real(noise))                                              #doctest: +SKIP
    >>> subplot(212), plot(linspace(-0.5, 0.5, 512), abs(fftshift(fft(noise))) ** 2) #doctest: +SKIP

    .. plot:: docstring_plots/generators/noise/noisecg.py
    """
    assert n_points > 0
    if n_points <= 2:
        noise = (np.random.randn(n_points, 1.) + 1j * np.random.randn(n_points, 1.)) / np.sqrt(2.)
    else:
        noise = np.random.normal(size=int(2. ** nextpow2(float(n_points)),))
        noise = hilbert(noise) / noise.std() / np.sqrt(2.)
        noise = noise[len(noise) - np.arange(n_points - 1, -1, -1) - 1]
    return noise
Ejemplo n.º 5
0
def noisecu(n_points):
    """Compute analytic complex uniform white noise.

    :param n_points: Length of the noise signal.
    :type n_points: int
    :return: analytic complex uniform white noise signal of length N
    :rtype: numpy.ndarray
    :Examples:
    >>> noise = noisecu(512)
    >>> print noise.mean()
    0.0
    >>> print std(noise) ** 2
    1.0
    >>> subplot(211), plot(real(noise))
    >>> subplot(212), plot(linspace(-0.5, 0.5, 512), abs(fftshift(fft(noise))) ** 2)

    .. plot:: docstring_plots/generators/noise/noisecu.py
    """
    if n_points <= 2:
        noise = (np.random.rand(n_points, 1) - 0.5 + 1j *
                 (np.random.rand(n_points, 1) - 0.5)) * np.sqrt(6)
    else:
        noise = np.random.rand(2**nextpow2(n_points), ) - 0.5
        noise = hilbert(noise) / noise.std() / np.sqrt(2)
        inds = noise.shape[0] - np.arange(n_points - 1, -1, step=-1) - 1
        noise = noise[inds]
    return noise
Ejemplo n.º 6
0
def noisecg(n_points, a1=None, a2=None):
    """
    Generate analytic complex gaussian noise with mean 0.0 and variance 1.0.

    :param n_points: Length of the desired output signal.
    :param a1:
        Coefficients of the filter through which the noise is passed.
    :param a2:
        Coefficients of the filter through which the noise is passed.
    :type n_points: int
    :type a1: float
    :type a2: float
    :return: Analytic complex Gaussian noise of length n_points.
    :rtype: numpy.ndarray
    :Examples:
    >>> noise = noisecg(512)
    >>> print noise.mean()
    0.0
    >>> print std(noise) ** 2
    1.0
    >>> subplot(211), plot(real(noise))
    >>> subplot(212), plot(linspace(-0.5, 0.5, 512), abs(fftshift(fft(noise))) ** 2)

    .. plot:: docstring_plots/generators/noise/noisecg.py
    """
    assert n_points > 0
    if n_points <= 2:
        noise = (np.random.randn(n_points, 1) +
                 1j * np.random.randn(n_points, 1)) / np.sqrt(2)
    else:
        noise = np.random.normal(size=(2**nextpow2(n_points), ))
        noise = hilbert(noise) / noise.std() / np.sqrt(2)
        noise = noise[len(noise) - np.arange(n_points - 1, -1, -1) - 1]
    return noise
Ejemplo n.º 7
0
def noisecu(n_points):
    """Compute analytic complex uniform white noise.

    :param n_points: Length of the noise signal.
    :type n_points: int
    :return: analytic complex uniform white noise signal of length N
    :rtype: numpy.ndarray
    :Examples:
    >>> import matplotlib.pyplot as plt
    >>> import numpy as np
    >>> noise = noisecu(512)
    >>> print("%.2f" % abs((noise ** 2).mean()))
    0.00
    >>> print("%.1f" % np.std(noise) ** 2)
    1.0
    >>> plt.subplot(211), plt.plot(real(noise))                                #doctest: +SKIP
    >>> plt.subplot(212),  #doctest: +SKIP
    >>> plt.plot(linspace(-0.5, 0.5, 512), abs(fftshift(fft(noise))) ** 2) #doctest: +SKIP

    .. plot:: docstring_plots/generators/noise/noisecu.py
    """
    if n_points <= 2:
        noise = (np.random.rand(n_points, 1) - 0.5 + 1j * (np.random.rand(n_points, 1) - 0.5)) * \
            np.sqrt(6)
    else:
        noise = np.random.rand(2**int(nextpow2(n_points)), ) - 0.5
        noise = hilbert(noise) / noise.std() / np.sqrt(2)
        inds = noise.shape[0] - np.arange(n_points - 1, -1, step=-1) - 1
        noise = noise[inds]
    return noise
Ejemplo n.º 8
0
 def _get_nvoices(self):
     q = (self.bw * self.T * (1 + 2 / self.R) * np.log(
         (1 + self.R / 2) / (1 - self.R / 2)))
     nq = np.ceil(q / 2)
     nmin = nq - nq % 2
     ndflt = 2**nextpow2(nmin)
     self.n_voices = int(ndflt)
Ejemplo n.º 9
0
def wide_band(signal, fmin=None, fmax=None, N=None):
    if 1 in signal.shape:
        signal = signal.ravel()
    elif signal.ndim != 1:
        raise ValueError("The input signal should be one dimensional.")
    s_ana = hilbert(np.real(signal))
    nx = signal.shape[0]
    m = np.round(nx / 2.0)
    t = np.arange(nx) - m
    tmin = 0
    tmax = nx - 1
    T = tmax - tmin

    # determine default values for fmin, fmax
    if (fmin is None) or (fmax is None):
        from matplotlib.mlab import find
        STF = np.fft.fftshift(s_ana)
        sp = np.abs(STF[:m]) ** 2
        maxsp = np.amax(sp)
        f = np.linspace(0, 0.5, m + 1)
        f = f[:m]
        indmin = find(sp > maxsp / 100.0).min()
        indmax = find(sp > maxsp / 100.0).max()
        if fmin is None:
            fmin = max([0.01, 0.05 * np.fix(f[indmin] / 0.05)])
        if fmax is None:
            fmax = 0.05 * np.ceil(f[indmax] / 0.05)
    B = fmax - fmin
    R = B / ((fmin + fmax) / 2.0)
    nq = np.ceil((B * T * (1 + 2.0 / R) * np.log((1 + R / 2.0) / (1 - R / 2.0))) / 2.0)
    nmin = nq - (nq % 2)
    if N is None:
        N = int(2 ** (nextpow2(nmin)))

    # geometric sampling for the analyzed spectrum
    k = np.arange(1, N + 1)
    q = (fmax / fmin) ** (1.0 / (N - 1))
    geo_f = fmin * (np.exp((k - 1) * np.log(q)))
    tfmatx = -2j * np.dot(t.reshape(-1, 1), geo_f.reshape(1, -1)) * np.pi
    tfmatx = np.exp(tfmatx)
    S = np.dot(s_ana.reshape(1, -1), tfmatx)
    S = np.tile(S, (nx, 1))
    Sb = S * tfmatx

    tau = t
    S = np.c_[S, np.zeros((nx, N))].T
    Sb = np.c_[Sb, np.zeros((nx, N))].T

    # mellin transform computation of the analyzed signal
    p = np.arange(2 * N)
    coef = np.exp(p / 2.0 * np.log(q))
    mellinS = np.fft.fftshift(np.fft.ifft(S[:, 0] * coef))
    mellinS = np.tile(mellinS, (nx, 1)).T

    mellinSb = np.zeros((2 * N, nx), dtype=complex)
    for i in range(nx):
        mellinSb[:, i] = np.fft.fftshift(np.fft.ifft(Sb[:, i] * coef))

    k = np.arange(1, 2 * N + 1)
    scale = np.logspace(np.log10(fmin / fmax), np.log10(fmax / fmin), N)
    theta = np.log(scale)
    mellinSSb = mellinS * np.conj(mellinSb)

    waf = np.fft.ifft(mellinSSb, N, axis=0)
    no2 = int((N + N % 2) / 2.0)
    waf = np.r_[waf[no2:(N + 1), :], waf[:no2, :]]

    # normalization
    s = np.real(s_ana)
    SP = np.fft.fft(hilbert(s))
    indmin = int(1 + np.round(fmin * (nx - 2)))
    indmax = int(1 + np.round(fmax * (nx - 2)))
    sp_ana = SP[(indmin - 1):indmax]
    waf *= (np.linalg.norm(sp_ana) ** 2) / waf[no2 - 1, m - 1] / N

    return waf, tau, theta
Ejemplo n.º 10
0
def bertrand(signal, timestamps=None, fmin=None, fmax=None, n_voices=None):
    """bertrand

    :param signal:
    :param timestamps:
    :param fmin:
    :param fmax:
    :param n_voices:
    :type signal:
    :type timestamps:
    :type fmin:
    :type fmax:
    :type n_voices:
:return:
:rtype:
    """
    xrow = signal.shape[0]
    if timestamps is None:
        timestamps = np.arange(xrow)

    tcol = timestamps.shape[0]
    x1 = signal.copy()
    x2 = signal.copy()

    s1 = np.real(x1)
    s2 = np.real(x2)
    m = (xrow + (xrow % 2)) / 2
    t = np.arange(xrow) - m - 1
    tmin = 1
    tmax = xrow
    T = tmax - tmin
    mt = xrow

    if (fmin is None) or (fmax is None):
        stf1 = np.fft.fft(
            np.fft.fftshift(s1[timestamps.min():timestamps.max() + 1]))
        stf2 = np.fft.fft(
            np.fft.fftshift(s2[timestamps.min():timestamps.max() + 1]))
        nstf = stf1.shape[0]
        sp1 = np.abs(stf1[:int(np.round(nstf / 2.0))])**2
        sp2 = np.abs(stf2[:int(np.round(nstf / 2.0))])**2
        maxsp1 = sp1.max()
        maxsp2 = sp2.max()
        f = np.linspace(0, 0.5, np.round(nstf / 2.0) + 1)
        if fmin is None:
            mask = sp1 > maxsp1 / 100.0
            indmin = np.arange(mask.shape[0],
                               dtype=int)[mask.astype(bool)].min()
            fmin = max([0.01, 0.05 * np.floor(f[indmin] / 0.05)])
        if fmax is None:
            mask = sp2 > maxsp2 / 100.0
            indmax = np.arange(mask.shape[0],
                               dtype=int)[mask.astype(bool)].max()
            fmax = 0.05 * np.ceil(f[indmax] / 0.05)
    bw = fmax - fmin
    R = bw / (fmin + fmax) * 2.0
    umaxbert = lambda x: np.exp(x) - fmax / fmin
    umax = brenth(umaxbert, 0, 4)
    teq = m / (fmax * umax)
    if teq < mt:
        m0 = np.round((2 * m**2) / teq - m) + 1
        m1 = m + m0
        T = 2 * m1 - 1
    else:
        m0 = 0
        m1 = m

    if n_voices is None:
        nq = np.ceil((bw * T * (1 + 2.0 / R) * np.log(
            (1 + R / 2.0) / (1 - R / 2.0))) / 2)
        nmin = nq - nq % 2
        ndflt = 2**nextpow2(nmin)
        n_voices = int(ndflt)

    # Geometric sampling for the analyzed spectrum
    k = np.arange(1, n_voices + 1)
    q = (fmax / fmin)**(1 / (n_voices - 1.0))
    t = np.arange(1, mt + 1) - m - 1
    geo_f = fmin * np.exp((k - 1) * np.log(q))
    tfmatx = np.exp(
        -2 * 1j *
        np.dot(t.reshape(t.shape[0], 1), geo_f.reshape(1, geo_f.shape[0])) *
        np.pi)
    S1 = np.dot(s1.reshape(1, s1.shape[0]), tfmatx)
    S2 = np.dot(s2.reshape(1, s2.shape[0]), tfmatx)
    S1 = np.append(S1, np.zeros((n_voices, )))
    S2 = np.append(S2, np.zeros((n_voices, )))

    # Mellin tranform of signal
    p = np.arange(2 * n_voices)
    mellin1 = np.fft.fftshift(np.fft.ifft(S1))
    mellin2 = np.fft.fftshift(np.fft.ifft(S2))
    umin = -umax
    du = np.abs(umax - umin) / (2 * m1)
    u = np.linspace(umin, umax - du, (umax - umin) / du)
    u[m1] = 0
    beta = (p / float(n_voices) - 1) / (2 * np.log(q))

    # Computation of P0(t. f, f)
    waf = np.zeros((2 * m1, n_voices), dtype=complex)
    for n in np.hstack((np.arange(1, m1 + 1), np.arange(m1 + 2, 2 * m1 + 1))):
        mx1 = np.exp((-2 * 1j * np.pi * beta + 0.5) * np.log(
            (u[n - 1] / 2) * np.exp(-u[n - 1] / 2.0) /
            np.sinh(u[n - 1] / 2))) * mellin1
        mx2 = np.exp((-2 * 1j * np.pi * beta + 0.5) * np.log(
            (u[n - 1] / 2) * np.exp(u[n - 1] / 2.0) /
            np.sinh(u[n - 1] / 2))) * mellin2
        fx1 = np.fft.fft(np.fft.fftshift(mx1))[:n_voices]
        fx2 = np.fft.fft(np.fft.fftshift(mx2))[:n_voices]
        waf[n - 1, :] = fx1 * np.conj(fx2)
    waf[m1, :] = S1[:n_voices] * np.conj(S2[:n_voices])
    waf = np.vstack((waf[m1:(2 * m1), :], waf[:m1, :]))
    waf *= np.repeat(geo_f.reshape((1, geo_f.shape[0])), 2 * m1, axis=0)
    tffr = np.fft.ifft(waf, axis=0)
    tffr = np.real(
        np.rot90(np.vstack((tffr[m1:(2 * m1 + 1), :], tffr[:m1, :])), k=-1))
    # conversion from tff to tf using 1d interpolation
    tfr = np.zeros((n_voices, tcol))
    ts2 = (mt - 1.0) / 2
    gamma = np.linspace(-geo_f[n_voices - 1] * ts2, geo_f[n_voices - 1] * ts2,
                        2 * m1)
    for i in xrange(n_voices):
        ind = find(
            np.logical_and(gamma >= -geo_f[i] * ts2, gamma <= geo_f[i] * ts2))
        x = gamma[ind]
        y = tffr[i, ind]
        xi = (timestamps - ts2 - 1) * geo_f[i]
        tck = splrep(x, y)
        tfr[i, :] = splev(xi, tck).ravel()
    t = timestamps
    f = geo_f.ravel()

    # Normalization
    SP1 = np.fft.fft(hilbert(s1), axis=0)
    SP2 = np.fft.fft(hilbert(s2), axis=0)
    indmin = 1 + int(np.round(fmin * (tcol - 2)))
    indmax = 1 + int(np.round(fmax * (tcol - 2)))
    sp1_ana = SP1[(indmin - 1):indmax]
    sp2_ana = SP2[(indmin - 1):indmax]

    tfr = tfr * np.dot(sp1_ana.T, sp2_ana) / integrate_2d(tfr, t, f) / n_voices
    return tfr, t, f
Ejemplo n.º 11
0
def smoothed_pseudo_wigner(signal,
                           timestamps=None,
                           K='bertrand',
                           nh0=None,
                           ng0=0,
                           fmin=None,
                           fmax=None,
                           n_voices=None):
    """smoothed_pseudo_wigner

    :param signal:
    :param timestamps:
    :param K:
    :param nh0:
    :param ng0:
    :param fmin:
    :param fmax:
    :param n_voices:
    :type signal:
    :type timestamps:
    :type K:
    :type nh0:
    :type ng0:
    :type fmin:
    :type fmax:
    :type n_voices:
:return:
:rtype:
    """
    xrow = signal.shape[0]
    if timestamps is None:
        timestamps = np.arange(signal.shape[0])
    if nh0 is None:
        nh0 = np.sqrt(signal.shape[0])

    tcol = timestamps.shape[0]
    mt = signal.shape[0]

    x1 = x2 = signal.copy()
    s1 = np.real(x1)
    s2 = np.real(x2)
    m = (mt + np.remainder(mt, 2.0)) / 2.0

    if (fmin is None) or (fmax is None):
        stf1 = np.fft.fft(
            np.fft.fftshift(s1[timestamps.min():timestamps.max() + 1]))
        stf2 = np.fft.fft(
            np.fft.fftshift(s2[timestamps.min():timestamps.max() + 1]))
        nstf = stf1.shape[0]
        sp1 = np.abs(stf1[:int(np.round(nstf / 2.0))])**2
        sp2 = np.abs(stf2[:int(np.round(nstf / 2.0))])**2
        maxsp1 = sp1.max()
        maxsp2 = sp2.max()
        f = np.linspace(0, 0.5,
                        np.round(nstf / 2.0) + 1)[:int(np.round(nstf / 2.0))]
        if fmin is None:
            mask = sp1 > maxsp1 / 100.0
            indmin = np.arange(mask.shape[0],
                               dtype=int)[mask.astype(bool)].min()
            fmin = max([0.01, 0.05 * np.floor(f[indmin] / 0.05)])
        if fmax is None:
            mask = sp2 > maxsp2 / 100.0
            indmax = np.arange(mask.shape[0],
                               dtype=int)[mask.astype(bool)].max()
            fmax = 0.05 * np.ceil(f[indmax] / 0.05)

    B = fmax - fmin
    R = B / ((fmin + fmax) / 2.0)
    ratio = fmax / fmin
    umax = np.log(ratio)
    teq = nh0 / (fmax * umax)
    if teq > 2 * nh0:
        m0 = (2 * nh0**2) / teq - nh0 + 1
    else:
        m0 = 0
    mu = np.round(nh0 + m0)
    T = 2 * mu - 1

    if n_voices is None:
        nq = np.ceil((B * T * (1 + 2.0 / R) * np.log(
            (1 + R / 2.0) / (1 - R / 2.0))) / 2)
        nmin = nq - nq % 2
        ndflt = 2**nextpow2(nmin)
        n_voices = int(ndflt)

    k = np.arange(1, n_voices + 1)
    q = ratio**(1.0 / (n_voices - 1))
    a = np.exp((k - 1) * np.log(q))
    geo_f = fmin * a

    # Wavelet decomposition computation
    matxte1 = np.zeros((n_voices, tcol), dtype=complex)
    matxte2 = np.zeros((n_voices, tcol), dtype=complex)
    _, _, _, wt1 = scalogram(s1,
                             time_instants=timestamps,
                             waveparams=nh0,
                             fmin=fmin,
                             fmax=fmax,
                             n_voices=n_voices)
    _, _, _, wt2 = scalogram(s2,
                             time_instants=timestamps,
                             waveparams=nh0,
                             fmin=fmin,
                             fmax=fmax,
                             n_voices=n_voices)
    for ptr in xrange(n_voices):
        matxte1[ptr, :] = wt1[ptr, :] * np.sqrt(a[n_voices - ptr - 1])
        matxte2[ptr, :] = wt2[ptr, :] * np.sqrt(a[n_voices - ptr - 1])

    umin = -umax
    u = np.linspace(umin, umax, 2 * mu + 1)
    u = u[:(2 * mu)]
    u[mu] = 0
    p = np.arange(2 * n_voices)
    beta = (p / float(n_voices) - 1.0) / (2 * np.log(q))
    l1 = l2 = np.zeros((2 * mu, 2 * n_voices), dtype=complex)
    for m in xrange(l1.shape[0]):
        l1[m, :] = np.exp(-2 * np.pi * 1j * beta * np.log(lambdak(u[m], K)))
        l2[m, :] = np.exp(-2 * np.pi * 1j * beta * np.log(lambdak(-u[m], K)))

    # Calculate time smoothing window
    if ng0 == 0:
        G = np.ones((2 * mu))
    else:
        a_t = 3
        sigma_t = ng0 * fmax / np.sqrt(2 * a_t * np.log(10))
        a_u = 2 * np.pi**2 * sigma_t**2 * umax**2 / np.log(10)
        G = np.exp(-(a_u * np.log(10) / mu**2) * np.arange(-mu, mu)**2)

    waf = np.zeros((2 * mu, n_voices))
    tfr = np.zeros((n_voices, tcol))
    S1 = S2 = np.zeros((2 * n_voices, ), dtype=complex)
    mx1 = mx2 = np.zeros((2 * n_voices, 2 * mu))

    for ti in xrange(tcol):
        S1[:n_voices] = matxte1[:, ti]
        mellin1 = np.fft.fftshift(np.fft.ifft(S1))
        mx1 = l1 * mellin1.reshape(1, mellin1.shape[0]).repeat(2 * mu, 0)
        mx1 = np.fft.fft(mx1, axis=0)
        tx1 = mx1[:n_voices, :].T

        S2[:n_voices] = matxte2[:, ti]
        mellin2 = np.fft.fftshift(np.fft.ifft(S2))
        mx2 = l2 * mellin2.reshape(1, mellin2.shape[0]).repeat(2 * mu, 0)
        mx2 = np.fft.fft(mx2, axis=0)
        tx2 = mx2[:n_voices, :].T
        waf = np.real(tx1 * np.conj(tx2)) * G.reshape(G.shape[0], 1).repeat(
            n_voices, axis=1)
        tfr[:, ti] = np.sum(waf) * geo_f

    t = timestamps
    f = geo_f

    # Normalization
    sp1 = np.fft.fft(hilbert(s1))
    sp2 = np.fft.fft(hilbert(s2))
    indmin = 1 + np.round(fmin * (xrow - 2))
    indmax = 1 + np.round(fmax * (xrow - 2))
    sp1_ana = sp1[indmin:(indmax + 1)]
    sp2_ana = sp2[indmin:(indmax + 1)]
    xx = np.dot(np.real(sp1_ana), np.real(sp2_ana))
    xx += np.dot(np.imag(sp1_ana), np.imag(sp2_ana))
    tfr = tfr * xx / integrate_2d(tfr, t, f) / n_voices
    return tfr, t, f
Ejemplo n.º 12
0
def bertrand(signal, timestamps=None, fmin=None, fmax=None, n_voices=None):
    """bertrand

    :param signal:
    :param timestamps:
    :param fmin:
    :param fmax:
    :param n_voices:
    :type signal:
    :type timestamps:
    :type fmin:
    :type fmax:
    :type n_voices:
:return:
:rtype:
    """
    xrow = signal.shape[0]
    if timestamps is None:
        timestamps = np.arange(xrow)

    tcol = timestamps.shape[0]
    x1 = signal.copy()
    x2 = signal.copy()

    s1 = np.real(x1)
    s2 = np.real(x2)
    m = (xrow + (xrow % 2)) / 2
    t = np.arange(xrow) - m - 1
    tmin = 1
    tmax = xrow
    T = tmax - tmin
    mt = xrow

    if (fmin is None) or (fmax is None):
        stf1 = np.fft.fft(np.fft.fftshift(s1[timestamps.min():timestamps.max() + 1]))
        stf2 = np.fft.fft(np.fft.fftshift(s2[timestamps.min():timestamps.max() + 1]))
        nstf = stf1.shape[0]
        sp1 = np.abs(stf1[:int(np.round(nstf / 2.0))]) ** 2
        sp2 = np.abs(stf2[:int(np.round(nstf / 2.0))]) ** 2
        maxsp1 = sp1.max()
        maxsp2 = sp2.max()
        f = np.linspace(0, 0.5, np.round(nstf / 2.0) + 1)
        if fmin is None:
            mask = sp1 > maxsp1 / 100.0
            indmin = np.arange(mask.shape[0], dtype=int)[mask.astype(bool)].min()
            fmin = max([0.01, 0.05 * np.floor(f[indmin] / 0.05)])
        if fmax is None:
            mask = sp2 > maxsp2 / 100.0
            indmax = np.arange(mask.shape[0], dtype=int)[mask.astype(bool)].max()
            fmax = 0.05 * np.ceil(f[indmax] / 0.05)
    bw = fmax - fmin
    R = bw / (fmin + fmax) * 2.0
    umaxbert = lambda x: np.exp(x) - fmax / fmin
    umax = brenth(umaxbert, 0, 4)
    teq = m / (fmax * umax)
    if teq < mt:
        m0 = np.round((2 * m ** 2) / teq - m) + 1
        m1 = m + m0
        T = 2 * m1 - 1
    else:
        m0 = 0
        m1 = m

    if n_voices is None:
        nq = np.ceil((bw * T * (1 + 2.0 / R) * np.log((1 + R / 2.0) / (1 - R / 2.0))) / 2)
        nmin = nq - nq % 2
        ndflt = 2 ** nextpow2(nmin)
        n_voices = int(ndflt)

    # Geometric sampling for the analyzed spectrum
    k = np.arange(1, n_voices + 1)
    q = (fmax / fmin) ** (1 / (n_voices - 1.0))
    t = np.arange(1, mt + 1) - m - 1
    geo_f = fmin * np.exp((k - 1) * np.log(q))
    tfmatx = np.exp(-2 * 1j * np.dot(t.reshape(t.shape[0], 1),
                                     geo_f.reshape(1, geo_f.shape[0])) * np.pi)
    S1 = np.dot(s1.reshape(1, s1.shape[0]), tfmatx)
    S2 = np.dot(s2.reshape(1, s2.shape[0]), tfmatx)
    S1 = np.append(S1, np.zeros((n_voices,)))
    S2 = np.append(S2, np.zeros((n_voices,)))

    # Mellin tranform of signal
    p = np.arange(2 * n_voices)
    mellin1 = np.fft.fftshift(np.fft.ifft(S1))
    mellin2 = np.fft.fftshift(np.fft.ifft(S2))
    umin = -umax
    du = np.abs(umax - umin) / (2 * m1)
    u = np.linspace(umin, umax - du, (umax - umin) / du)
    u[m1] = 0
    beta = (p / float(n_voices) - 1) / (2 * np.log(q))

    # Computation of P0(t. f, f)
    waf = np.zeros((2 * m1, n_voices), dtype=complex)
    for n in np.hstack((np.arange(1, m1 + 1), np.arange(m1 + 2, 2 * m1 + 1))):
        mx1 = np.exp((-2 * 1j * np.pi * beta + 0.5) * np.log((u[n - 1] / 2) *
            np.exp(-u[n - 1] / 2.0) / np.sinh(u[n - 1] / 2))) * mellin1
        mx2 = np.exp((-2 * 1j * np.pi * beta + 0.5) * np.log((u[n - 1] / 2) *
            np.exp(u[n - 1] / 2.0) / np.sinh(u[n - 1] / 2))) * mellin2
        fx1 = np.fft.fft(np.fft.fftshift(mx1))[:n_voices]
        fx2 = np.fft.fft(np.fft.fftshift(mx2))[:n_voices]
        waf[n - 1, :] = fx1 * np.conj(fx2)
    waf[m1, :] = S1[:n_voices] * np.conj(S2[:n_voices])
    waf = np.vstack((waf[m1:(2 * m1), :], waf[:m1, :]))
    waf *= np.repeat(geo_f.reshape((1, geo_f.shape[0])), 2 * m1, axis=0)
    tffr = np.fft.ifft(waf, axis=0)
    tffr = np.real(np.rot90(np.vstack((tffr[m1:(2 * m1 + 1), :],
                                       tffr[:m1, :])), k=-1))
    # conversion from tff to tf using 1d interpolation
    tfr = np.zeros((n_voices, tcol))
    ts2 = (mt - 1.0) / 2
    gamma = np.linspace(-geo_f[n_voices - 1] * ts2,
                        geo_f[n_voices - 1] * ts2, 2 * m1)
    for i in range(n_voices):
        ind = find(np.logical_and(gamma >= -geo_f[i] * ts2,
                                  gamma <= geo_f[i] * ts2))
        x = gamma[ind]
        y = tffr[i, ind]
        xi = (timestamps - ts2 - 1) * geo_f[i]
        tck = splrep(x, y)
        tfr[i, :] = splev(xi, tck).ravel()
    t = timestamps
    f = geo_f.ravel()

    # Normalization
    SP1 = np.fft.fft(hilbert(s1), axis=0)
    SP2 = np.fft.fft(hilbert(s2), axis=0)
    indmin = 1 + int(np.round(fmin * (tcol - 2)))
    indmax = 1 + int(np.round(fmax * (tcol - 2)))
    sp1_ana = SP1[(indmin - 1):indmax]
    sp2_ana = SP2[(indmin - 1):indmax]

    tfr = tfr * np.dot(sp1_ana.T, sp2_ana) / integrate_2d(tfr, t, f) / n_voices
    return tfr, t, f
Ejemplo n.º 13
0
def smoothed_pseudo_wigner(signal, timestamps=None, K='bertrand', nh0=None,
        ng0=0, fmin=None, fmax=None, n_voices=None):
    """smoothed_pseudo_wigner

    :param signal:
    :param timestamps:
    :param K:
    :param nh0:
    :param ng0:
    :param fmin:
    :param fmax:
    :param n_voices:
    :type signal:
    :type timestamps:
    :type K:
    :type nh0:
    :type ng0:
    :type fmin:
    :type fmax:
    :type n_voices:
:return:
:rtype:
    """
    xrow = signal.shape[0]
    if timestamps is None:
        timestamps = np.arange(signal.shape[0])
    if nh0 is None:
        nh0 = np.sqrt(signal.shape[0])

    tcol = timestamps.shape[0]
    mt = signal.shape[0]

    x1 = x2 = signal.copy()
    s1 = np.real(x1)
    s2 = np.real(x2)
    m = (mt + np.remainder(mt, 2.0)) / 2.0

    if (fmin is None) or (fmax is None):
        stf1 = np.fft.fft(np.fft.fftshift(s1[timestamps.min():timestamps.max() + 1]))
        stf2 = np.fft.fft(np.fft.fftshift(s2[timestamps.min():timestamps.max() + 1]))
        nstf = stf1.shape[0]
        sp1 = np.abs(stf1[:int(np.round(nstf / 2.0))]) ** 2
        sp2 = np.abs(stf2[:int(np.round(nstf / 2.0))]) ** 2
        maxsp1 = sp1.max()
        maxsp2 = sp2.max()
        f = np.linspace(0, 0.5, np.round(nstf / 2.0) + 1)[:int(np.round(nstf / 2.0))]
        if fmin is None:
            mask = sp1 > maxsp1 / 100.0
            indmin = np.arange(mask.shape[0], dtype=int)[mask.astype(bool)].min()
            fmin = max([0.01, 0.05 * np.floor(f[indmin] / 0.05)])
        if fmax is None:
            mask = sp2 > maxsp2 / 100.0
            indmax = np.arange(mask.shape[0], dtype=int)[mask.astype(bool)].max()
            fmax = 0.05 * np.ceil(f[indmax] / 0.05)

    B = fmax - fmin
    R = B / ((fmin + fmax) / 2.0)
    ratio = fmax / fmin
    umax = np.log(ratio)
    teq = nh0 / (fmax * umax)
    if teq > 2 * nh0:
        m0 = (2 * nh0 ** 2) / teq - nh0 + 1
    else:
        m0 = 0
    mu = np.round(nh0 + m0)
    T = 2 * mu - 1

    if n_voices is None:
        nq = np.ceil((B * T * (1 + 2.0 / R) * np.log((1 + R / 2.0) / (1 - R / 2.0))) / 2)
        nmin = nq - nq % 2
        ndflt = 2 ** nextpow2(nmin)
        n_voices = int(ndflt)

    k = np.arange(1, n_voices + 1)
    q = ratio ** (1.0 / (n_voices - 1))
    a = np.exp((k - 1) * np.log(q))
    geo_f = fmin * a

    # Wavelet decomposition computation
    matxte1 = np.zeros((n_voices, tcol), dtype=complex)
    matxte2 = np.zeros((n_voices, tcol), dtype=complex)
    _, _, _, wt1 = scalogram(s1, time_instants=timestamps, waveparams=nh0,
            fmin=fmin, fmax=fmax, n_voices=n_voices)
    _, _, _, wt2 = scalogram(s2, time_instants=timestamps, waveparams=nh0,
            fmin=fmin, fmax=fmax, n_voices=n_voices)
    for ptr in range(n_voices):
        matxte1[ptr, :] = wt1[ptr, :] * np.sqrt(a[n_voices - ptr - 1])
        matxte2[ptr, :] = wt2[ptr, :] * np.sqrt(a[n_voices - ptr - 1])

    umin = -umax
    u = np.linspace(umin, umax, 2 * mu + 1)
    u = u[:(2 * mu)]
    u[mu] = 0
    p = np.arange(2 * n_voices)
    beta = (p / float(n_voices) - 1.0) / (2 * np.log(q))
    l1 = l2 = np.zeros((2 * mu, 2 * n_voices), dtype=complex)
    for m in range(l1.shape[0]):
        l1[m, :] = np.exp(-2 * np.pi * 1j * beta * np.log(lambdak(u[m], K)))
        l2[m, :] = np.exp(-2 * np.pi * 1j * beta * np.log(lambdak(-u[m], K)))

    # Calculate time smoothing window
    if ng0 == 0:
        G = np.ones((2 * mu))
    else:
        a_t = 3
        sigma_t = ng0 * fmax / np.sqrt(2 * a_t * np.log(10))
        a_u = 2 * np.pi ** 2 * sigma_t ** 2 * umax ** 2 / np.log(10)
        G = np.exp(-(a_u * np.log(10) / mu ** 2) * np.arange(-mu, mu) ** 2)

    waf = np.zeros((2 * mu, n_voices))
    tfr = np.zeros((n_voices, tcol))
    S1 = S2 = np.zeros((2 * n_voices,), dtype=complex)
    mx1 = mx2 = np.zeros((2 * n_voices, 2 * mu))

    for ti in range(tcol):
        S1[:n_voices] = matxte1[:, ti]
        mellin1 = np.fft.fftshift(np.fft.ifft(S1))
        mx1 = l1 * mellin1.reshape(1, mellin1.shape[0]).repeat(2 * mu, 0)
        mx1 = np.fft.fft(mx1, axis=0)
        tx1 = mx1[:n_voices, :].T

        S2[:n_voices] = matxte2[:, ti]
        mellin2 = np.fft.fftshift(np.fft.ifft(S2))
        mx2 = l2 * mellin2.reshape(1, mellin2.shape[0]).repeat(2 * mu, 0)
        mx2 = np.fft.fft(mx2, axis=0)
        tx2 = mx2[:n_voices, :].T
        waf = np.real(tx1 * np.conj(tx2)) * G.reshape(G.shape[0], 1).repeat(n_voices, axis=1)
        tfr[:, ti] = np.sum(waf) * geo_f

    t = timestamps
    f = geo_f

    # Normalization
    sp1 = np.fft.fft(hilbert(s1))
    sp2 = np.fft.fft(hilbert(s2))
    indmin = 1 + np.round(fmin * (xrow - 2))
    indmax = 1 + np.round(fmax * (xrow - 2))
    sp1_ana = sp1[indmin:(indmax + 1)]
    sp2_ana = sp2[indmin:(indmax + 1)]
    xx = np.dot(np.real(sp1_ana), np.real(sp2_ana))
    xx += np.dot(np.imag(sp1_ana), np.imag(sp2_ana))
    tfr = tfr * xx / integrate_2d(tfr, t, f) / n_voices
    return tfr, t, f
Ejemplo n.º 14
0
def wide_band(signal, fmin=None, fmax=None, N=None):
    if 1 in signal.shape:
        signal = signal.ravel()
    elif signal.ndim != 1:
        raise ValueError("The input signal should be one dimensional.")
    s_ana = hilbert(np.real(signal))
    nx = signal.shape[0]
    m = int(np.round(nx / 2.0))
    t = np.arange(nx) - m
    tmin = 0
    tmax = nx - 1
    T = tmax - tmin

    # determine default values for fmin, fmax
    if (fmin is None) or (fmax is None):
        from matplotlib.mlab import find
        STF = np.fft.fftshift(s_ana)
        sp = np.abs(STF[:m])**2
        maxsp = np.amax(sp)
        f = np.linspace(0, 0.5, m + 1)
        f = f[:m]
        indmin = find(sp > maxsp / 100.0).min()
        indmax = find(sp > maxsp / 100.0).max()
        if fmin is None:
            fmin = max([0.01, 0.05 * np.fix(f[indmin] / 0.05)])
        if fmax is None:
            fmax = 0.05 * np.ceil(f[indmax] / 0.05)
    B = fmax - fmin
    R = B / ((fmin + fmax) / 2.0)
    nq = np.ceil((B * T * (1 + 2.0 / R) * np.log(
        (1 + R / 2.0) / (1 - R / 2.0))) / 2.0)
    nmin = nq - (nq % 2)
    if N is None:
        N = int(2**(nextpow2(nmin)))

    # geometric sampling for the analyzed spectrum
    k = np.arange(1, N + 1)
    q = (fmax / fmin)**(1.0 / (N - 1))
    geo_f = fmin * (np.exp((k - 1) * np.log(q)))
    tfmatx = -2j * np.dot(t.reshape(-1, 1), geo_f.reshape(1, -1)) * np.pi
    tfmatx = np.exp(tfmatx)
    S = np.dot(s_ana.reshape(1, -1), tfmatx)
    S = np.tile(S, (nx, 1))
    Sb = S * tfmatx

    tau = t
    S = np.c_[S, np.zeros((nx, N))].T
    Sb = np.c_[Sb, np.zeros((nx, N))].T

    # mellin transform computation of the analyzed signal
    p = np.arange(2 * N)
    coef = np.exp(p / 2.0 * np.log(q))
    mellinS = np.fft.fftshift(np.fft.ifft(S[:, 0] * coef))
    mellinS = np.tile(mellinS, (nx, 1)).T

    mellinSb = np.zeros((2 * N, nx), dtype=complex)
    for i in range(nx):
        mellinSb[:, i] = np.fft.fftshift(np.fft.ifft(Sb[:, i] * coef))

    k = np.arange(1, 2 * N + 1)
    scale = np.logspace(np.log10(fmin / fmax), np.log10(fmax / fmin), N)
    theta = np.log(scale)
    mellinSSb = mellinS * np.conj(mellinSb)

    waf = np.fft.ifft(mellinSSb, N, axis=0)
    no2 = int((N + N % 2) / 2.0)
    waf = np.r_[waf[no2:(N + 1), :], waf[:no2, :]]

    # normalization
    s = np.real(s_ana)
    SP = np.fft.fft(hilbert(s))
    indmin = int(1 + np.round(fmin * (nx - 2)))
    indmax = int(1 + np.round(fmax * (nx - 2)))
    sp_ana = SP[(indmin - 1):indmax]
    waf *= (np.linalg.norm(sp_ana)**2) / waf[no2 - 1, m - 1] / N

    return waf, tau, theta
Ejemplo n.º 15
0
 def _get_nvoices(self):
     nq = np.ceil((self.bw * self.T * (1 + 2.0 / self.R) * np.log((1 + self.R / 2.0) / (1 - self.R / 2.0))) / 2)
     nmin = nq - nq % 2
     ndflt = 2 ** nextpow2(nmin)
     self.n_voices = int(ndflt)