Пример #1
0
    def run(self):
        f = np.logspace(np.log10(self.fmin), np.log10(self.fmax), self.n_voices)
        a = np.logspace(np.log10(self.fmax / float(self.fmin)), np.log10(1), self.n_voices)
        wt = np.zeros((self.n_voices, self.ts.shape[0]), dtype=complex)
        if self.waveparams > 0:
            for ptr in range(self.n_voices):
                nha = self.waveparams * a[ptr]
                tha = np.arange(-int(np.round(nha)), int(np.round(nha)) + 1)
                x = np.exp(-(2 * np.log(10) / (nha ** 2)) * tha ** 2)
                y = np.exp(1j * 2 * np.pi * f[ptr] * tha)
                ha = x * y
                detail = np.convolve(self.z, ha) / np.sqrt(a[ptr])
                ix = np.arange(int(np.round(nha)), detail.shape[0] - int(np.round(nha)) + 1,
                            dtype=int)
                wt[ptr, :] = detail[self.ts]
                detail = detail[ix]
                self.tfr[ptr, :] = detail[self.ts] * np.conj(detail[self.ts])
        elif self.waveparams == 0:
            for ptr in range(self.n_voices):
                ha = mexhat(f[ptr])
                nha = (ha.shape[0] - 1) / 2
                detail = np.convolve(self.z, ha) / np.sqrt(a[ptr])
                ix = np.arange(int(np.round(nha)) + 1, detail.shape[0] - int(np.round(nha)) + 1)
                detail = detail[ix]
                wt[ptr, :] = detail[self.ts]
                self.tfr[ptr, :] = detail[self.ts] * np.conj(detail[self.ts])
        elif isinstance(self.waveparams, np.ndarray):
            rwav, cwav = self.waveparams.shape
            if cwav > rwav:
                self.waveparams = self.waveparams.T
            wavef = np.fft.fft(self.waveparams, axis=0)
            nwave = self.waveparams.shape[0]
            f0 = wavef[np.abs(wavef[:nwave / 2]) == np.amax(np.abs(wavef[:nwave / 2]))]
            f0 = ((f0 - 1) * (1 / nwave)).mean()
            a = np.logspace(np.log10(f0 / float(self.fmin)), np.log10(f0 / float(self.fmax)), self.n_voices)
            B = 0.99
            R = B / (1.001 / 2)
            nscale = np.max([128, np.round((B * nwave * (1 + 2.0 / R) * np.log((1 +
                R / 2.0) / (1 - R / 2.0))) / 2)])
            wts = scale(self.waveparams, a, self.fmin, self.fmax, nscale)
            for ptr in range(self.n_voices):
                ha = wts[ptr, :]
                nha = ha.shape[0] / 2
                detail = np.convolve(self.z, ha) / np.sqrt(a[ptr])
                detail = detail[int(np.floor(nha)):(detail.shape[0] - np.round(nha))]
                wt[ptr, :] = detail[self.ts]
                self.tfr[ptr, :] = detail[self.ts] * np.conj(detail[self.ts])

        # Normalization
        SP = np.fft.fft(self.z, axis=0)
        indmin = 1 + int(np.round(self.fmin * (self.signal.shape[0] - 2)))
        indmax = 1 + int(np.round(self.fmax * (self.signal.shape[0] - 2)))
        SPana = SP[indmin:(indmax + 1)]
        self.tfr = np.real(self.tfr)
        self.tfr = self.tfr * (np.linalg.norm(SPana) ** 2) / integrate_2d(self.tfr, self.ts, f) / self.n_voices
        return self.tfr, self.ts, f, wt
Пример #2
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 jaidev <jaidev@newton>
#
# Distributed under terms of the MIT license.

"""

"""


from tftb.generators import mexhat
import matplotlib.pyplot as plt

plt.plot(mexhat())
plt.grid()
plt.title('Mexican Hat Wavelet')
plt.show()
Пример #3
0
def scalogram(signal,
              fmin=None,
              fmax=None,
              n_voices=None,
              time_instants=None,
              waveparams=None):
    """scalogram

    :param signal:
    :param fmin:
    :param fmax:
    :param n_voices:
    :param time_instants:
    :param waveparams:
    :type signal:
    :type fmin:
    :type fmax:
    :type n_voices:
    :type time_instants:
    :type waveparams:
:return:
:rtype:
    """
    # FIXME: Output from the MATLAB implementation differs significantly.
    if time_instants is None:
        time_instants = np.arange(signal.shape[0])
    if waveparams is None:
        waveparams = np.sqrt(signal.shape[0])
    if n_voices is None:
        n_voices = signal.shape[0]

    s_centered = np.real(signal) - np.real(signal).mean()
    z = hilbert(s_centered)

    if (fmin is None) or (fmax is None):
        stf = np.fft.fft(
            np.fft.fftshift(z[time_instants.min():time_instants.max() + 1]))
        nstf = stf.shape[0]
        sp = np.abs(stf[:int(np.round(nstf / 2.0))])**2
        maxsp = sp.max()
        f = np.linspace(0, 0.5, np.round(nstf / 2.0) + 1)
        if fmin is None:
            mask = sp > maxsp / 100.0
            indmin = np.arange(mask.shape[0],
                               dtype=int)[mask.astype(bool)].min()
            fmin = max([0.01, 0.05 * np.floor(f[indmin] / 0.05)])
        if fmax is None:
            mask = sp > maxsp / 100.0
            indmax = np.arange(mask.shape[0],
                               dtype=int)[mask.astype(bool)].max()
            fmax = 0.05 * np.ceil(f[indmax] / 0.05)

    f = np.logspace(np.log10(fmin), np.log10(fmax), n_voices)
    a = np.logspace(np.log10(fmax / float(fmin)), np.log10(1), n_voices)
    wt = np.zeros((n_voices, time_instants.shape[0]), dtype=complex)
    tfr = np.zeros((n_voices, time_instants.shape[0]), dtype=complex)

    if waveparams > 0:
        for ptr in xrange(n_voices):
            nha = waveparams * a[ptr]
            tha = np.arange(-np.round(nha), np.round(nha) + 1)
            x = np.exp(-(2 * np.log(10) / (nha**2)) * tha**2)
            y = np.exp(1j * 2 * np.pi * f[ptr] * tha)
            ha = x * y
            detail = np.convolve(z, ha) / np.sqrt(a[ptr])
            ix = np.arange(round(nha),
                           detail.shape[0] - np.round(nha) + 1,
                           dtype=int)
            wt[ptr, :] = detail[time_instants]
            detail = detail[ix]
            tfr[ptr, :] = detail[time_instants] * np.conj(
                detail[time_instants])
    elif waveparams == 0:
        for ptr in xrange(n_voices):
            ha = mexhat(f[ptr])
            nha = (ha.shape[0] - 1) / 2
            detail = np.convolve(z, ha) / np.sqrt(a[ptr])
            ix = np.arange(round(nha) + 1, detail.shape[0] - np.round(nha) + 1)
            detail = detail[ix]
            wt[ptr, :] = detail[time_instants]
            tfr[ptr, :] = detail[time_instants] * np.conj(
                detail[time_instants])
    elif isinstance(waveparams, np.ndarray):
        rwav, cwav = waveparams.shape
        if cwav > rwav:
            waveparams = waveparams.T
        wavef = np.fft.fft(waveparams, axis=0)
        nwave = waveparams.shape[0]
        f0 = wavef[np.abs(wavef[:nwave /
                                2]) == np.amax(np.abs(wavef[:nwave / 2]))]
        f0 = ((f0 - 1) * (1 / nwave)).mean()
        a = np.logspace(np.log10(f0 / float(fmin)), np.log10(f0 / float(fmax)),
                        n_voices)
        B = 0.99
        R = B / (1.001 / 2)
        nscale = np.max([
            128,
            np.round((B * nwave * (1 + 2.0 / R) * np.log(
                (1 + R / 2.0) / (1 - R / 2.0))) / 2)
        ])
        wts = scale(waveparams, a, fmin, fmax, nscale)
        for ptr in xrange(n_voices):
            ha = wts[ptr, :]
            nha = ha.shape[0] / 2
            detail = np.convolve(z, ha) / np.sqrt(a[ptr])
            detail = detail[int(np.floor(nha)):(detail.shape[0] -
                                                np.round(nha))]
            wt[ptr, :] = detail[time_instants]
            tfr[ptr, :] = detail[time_instants] * np.conj(
                detail[time_instants])

    t = time_instants
    f = f.T
    # Normalization
    SP = np.fft.fft(z, axis=0)
    indmin = 1 + np.round(fmin * (signal.shape[0] - 2))
    indmax = 1 + np.round(fmax * (signal.shape[0] - 2))
    SPana = SP[indmin:(indmax + 1)]
    tfr = np.real(tfr)
    tfr = tfr * (np.linalg.norm(SPana)**2) / integrate_2d(tfr, t, f) / n_voices
    return tfr, t, f, wt
Пример #4
0
def scalogram(signal, fmin=None, fmax=None, n_voices=None, time_instants=None,
              waveparams=None):
    """scalogram

    :param signal:
    :param fmin:
    :param fmax:
    :param n_voices:
    :param time_instants:
    :param waveparams:
    :type signal:
    :type fmin:
    :type fmax:
    :type n_voices:
    :type time_instants:
    :type waveparams:
:return:
:rtype:
    """
    # FIXME: Output from the MATLAB implementation differs significantly.
    if time_instants is None:
        time_instants = np.arange(signal.shape[0])
    if waveparams is None:
        waveparams = np.sqrt(signal.shape[0])
    if n_voices is None:
        n_voices = signal.shape[0]

    s_centered = np.real(signal) - np.real(signal).mean()
    z = hilbert(s_centered)

    if (fmin is None) or (fmax is None):
        stf = np.fft.fft(np.fft.fftshift(z[time_instants.min():time_instants.max() + 1]))
        nstf = stf.shape[0]
        sp = np.abs(stf[:int(np.round(nstf / 2.0))]) ** 2
        maxsp = sp.max()
        f = np.linspace(0, 0.5, np.round(nstf / 2.0) + 1)
        if fmin is None:
            mask = sp > maxsp / 100.0
            indmin = np.arange(mask.shape[0], dtype=int)[mask.astype(bool)].min()
            fmin = max([0.01, 0.05 * np.floor(f[indmin] / 0.05)])
        if fmax is None:
            mask = sp > maxsp / 100.0
            indmax = np.arange(mask.shape[0], dtype=int)[mask.astype(bool)].max()
            fmax = 0.05 * np.ceil(f[indmax] / 0.05)

    f = np.logspace(np.log10(fmin), np.log10(fmax), n_voices)
    a = np.logspace(np.log10(fmax / float(fmin)), np.log10(1), n_voices)
    wt = np.zeros((n_voices, time_instants.shape[0]), dtype=complex)
    tfr = np.zeros((n_voices, time_instants.shape[0]), dtype=complex)

    if waveparams > 0:
        for ptr in range(n_voices):
            nha = waveparams * a[ptr]
            tha = np.arange(-np.round(nha), np.round(nha) + 1)
            x = np.exp(-(2 * np.log(10) / (nha ** 2)) * tha ** 2)
            y = np.exp(1j * 2 * np.pi * f[ptr] * tha)
            ha = x * y
            detail = np.convolve(z, ha) / np.sqrt(a[ptr])
            ix = np.arange(round(nha), detail.shape[0] - np.round(nha) + 1,
                           dtype=int)
            wt[ptr, :] = detail[time_instants]
            detail = detail[ix]
            tfr[ptr, :] = detail[time_instants] * np.conj(detail[time_instants])
    elif waveparams == 0:
        for ptr in range(n_voices):
            ha = mexhat(f[ptr])
            nha = (ha.shape[0] - 1) / 2
            detail = np.convolve(z, ha) / np.sqrt(a[ptr])
            ix = np.arange(round(nha) + 1, detail.shape[0] - np.round(nha) + 1)
            detail = detail[ix]
            wt[ptr, :] = detail[time_instants]
            tfr[ptr, :] = detail[time_instants] * np.conj(detail[time_instants])
    elif isinstance(waveparams, np.ndarray):
        rwav, cwav = waveparams.shape
        if cwav > rwav:
            waveparams = waveparams.T
        wavef = np.fft.fft(waveparams, axis=0)
        nwave = waveparams.shape[0]
        f0 = wavef[np.abs(wavef[:nwave / 2]) == np.amax(np.abs(wavef[:nwave / 2]))]
        f0 = ((f0 - 1) * (1 / nwave)).mean()
        a = np.logspace(np.log10(f0 / float(fmin)), np.log10(f0 / float(fmax)), n_voices)
        B = 0.99
        R = B / (1.001 / 2)
        nscale = np.max([128, np.round((B * nwave * (1 + 2.0 / R) * np.log((1 +
            R / 2.0) / (1 - R / 2.0))) / 2)])
        wts = scale(waveparams, a, fmin, fmax, nscale)
        for ptr in range(n_voices):
            ha = wts[ptr, :]
            nha = ha.shape[0] / 2
            detail = np.convolve(z, ha) / np.sqrt(a[ptr])
            detail = detail[int(np.floor(nha)):(detail.shape[0] - np.round(nha))]
            wt[ptr, :] = detail[time_instants]
            tfr[ptr, :] = detail[time_instants] * np.conj(detail[time_instants])

    t = time_instants
    f = f.T
    # Normalization
    SP = np.fft.fft(z, axis=0)
    indmin = 1 + np.round(fmin * (signal.shape[0] - 2))
    indmax = 1 + np.round(fmax * (signal.shape[0] - 2))
    SPana = SP[indmin:(indmax + 1)]
    tfr = np.real(tfr)
    tfr = tfr * (np.linalg.norm(SPana) ** 2) / integrate_2d(tfr, t, f) / n_voices
    return tfr, t, f, wt
Пример #5
0
    def run(self):
        f = np.logspace(np.log10(self.fmin), np.log10(self.fmax),
                        self.n_voices)
        a = np.logspace(np.log10(self.fmax / float(self.fmin)), np.log10(1),
                        self.n_voices)
        wt = np.zeros((self.n_voices, self.ts.shape[0]), dtype=complex)
        if self.waveparams > 0:
            for ptr in range(self.n_voices):
                nha = self.waveparams * a[ptr]
                tha = np.arange(-int(np.round(nha)), int(np.round(nha)) + 1)
                x = np.exp(-(2 * np.log(10) / (nha**2)) * tha**2)
                y = np.exp(1j * 2 * np.pi * f[ptr] * tha)
                ha = x * y
                detail = np.convolve(self.z, ha) / np.sqrt(a[ptr])
                ix = np.arange(int(np.round(nha)),
                               detail.shape[0] - int(np.round(nha)) + 1,
                               dtype=int)
                wt[ptr, :] = detail[self.ts]
                detail = detail[ix]
                self.tfr[ptr, :] = detail[self.ts] * np.conj(detail[self.ts])
        elif self.waveparams == 0:
            for ptr in range(self.n_voices):
                ha = mexhat(f[ptr])
                nha = (ha.shape[0] - 1) / 2
                detail = np.convolve(self.z, ha) / np.sqrt(a[ptr])
                ix = np.arange(
                    int(np.round(nha)) + 1,
                    detail.shape[0] - int(np.round(nha)) + 1)
                detail = detail[ix]
                wt[ptr, :] = detail[self.ts]
                self.tfr[ptr, :] = detail[self.ts] * np.conj(detail[self.ts])
        elif isinstance(self.waveparams, np.ndarray):
            rwav, cwav = self.waveparams.shape
            if cwav > rwav:
                self.waveparams = self.waveparams.T
            wavef = np.fft.fft(self.waveparams, axis=0)
            nwave = self.waveparams.shape[0]
            f0 = wavef[np.abs(wavef[:nwave /
                                    2]) == np.amax(np.abs(wavef[:nwave / 2]))]
            f0 = ((f0 - 1) * (1 / nwave)).mean()
            a = np.logspace(np.log10(f0 / float(self.fmin)),
                            np.log10(f0 / float(self.fmax)), self.n_voices)
            B = 0.99
            R = B / (1.001 / 2)
            nscale = np.max([
                128,
                np.round((B * nwave * (1 + 2.0 / R) * np.log(
                    (1 + R / 2.0) / (1 - R / 2.0))) / 2)
            ])
            wts = scale(self.waveparams, a, self.fmin, self.fmax, nscale)
            for ptr in range(self.n_voices):
                ha = wts[ptr, :]
                nha = ha.shape[0] / 2
                detail = np.convolve(self.z, ha) / np.sqrt(a[ptr])
                detail = detail[int(np.floor(nha)):(detail.shape[0] -
                                                    np.round(nha))]
                wt[ptr, :] = detail[self.ts]
                self.tfr[ptr, :] = detail[self.ts] * np.conj(detail[self.ts])

        # Normalization
        SP = np.fft.fft(self.z, axis=0)
        indmin = 1 + int(np.round(self.fmin * (self.signal.shape[0] - 2)))
        indmax = 1 + int(np.round(self.fmax * (self.signal.shape[0] - 2)))
        SPana = SP[indmin:(indmax + 1)]
        self.tfr = np.real(self.tfr)
        self.tfr = self.tfr * (np.linalg.norm(SPana)**2) / integrate_2d(
            self.tfr, self.ts, f) / self.n_voices
        return self.tfr, self.ts, f, wt
Пример #6
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 jaidev <jaidev@newton>
#
# Distributed under terms of the MIT license.
"""

"""

from tftb.generators import mexhat
import matplotlib.pyplot as plt

plt.plot(mexhat())
plt.grid()
plt.title('Mexican Hat Wavelet')
plt.show()