コード例 #1
0
def get_filter_banks(filters_num,
                     NFFT,
                     samplerate,
                     low_freq=0,
                     high_freq=None):
    ''' Mel Bank
    filers_num: filter numbers
    NFFT:points of your FFT
    samplerate:sample rate
    low_freq: the lowest frequency that mel frequency include
    high_freq:the Highest frequency that mel frequency include
    '''
    #turn the hz scale into mel scale
    low_mel = hz2mel(low_freq)
    high_mel = hz2mel(high_freq)
    #in the mel scale, you should put the position of your filter number
    mel_points = np.linspace(low_mel, high_mel, filters_num + 2)
    #get back the hzscale of your filter position
    hz_points = mel2hz(mel_points)
    #Mel triangle bank design
    center = np.floor((NFFT + 1) * hz_points / samplerate)
    fbank = np.zeros([filters_num, int(NFFT / 2 + 1)])

    for i in range(0, filters_num):
        start = int(center[i])
        end = int(center[i + 2])
        tri_wid = end - start
        tri_fil = sg.bartlett(tri_wid, sym=False)
        fbank[i][start:end] = tri_fil

    return fbank
コード例 #2
0
ファイル: tp3.py プロジェクト: Rcastagnola/PDS
def Bartlett (N,x):
    
    ventana = sig.bartlett(N)
    
    salida = np.multiply(x,ventana)

    return salida
コード例 #3
0
ファイル: analysis.py プロジェクト: MaxLikelihood/PyDSP
    def __window_data(data):
        # Apply window function to the decoded data & store as new key:value pair in dictionary
        # Parameters: data: [{'frame_data': string,
        #                     'frame_count': int,
        #                     'frame_time': float,
        #                     'frame_position': int,
        #                     'frame_decoded': numpy.ndarray}, ...]

        # cache window function
        if 'hann' == config_analysis.frame_window:
            window = signal.hann(config_audio.frames_per_buffer)
        elif 'hamming' == config_analysis.frame_window:
            window = signal.hamming(config_audio.frames_per_buffer)
        elif 'blackman' == config_analysis.frame_window:
            window = signal.blackman(config_audio.frames_per_buffer)
        elif 'bartlett' == config_analysis.frame_window:
            window = signal.bartlett(config_audio.frames_per_buffer)
        elif 'barthann' == config_analysis.frame_window:
            window = signal.barthann(config_audio.frames_per_buffer)
        else:
            # window function unavailable
            return

        # apply specified window function in config
        for i in range(len(data)):
            data[i]['frame_windowed'] = data[i]['frame_decoded'][:] * window
コード例 #4
0
ファイル: plotting.py プロジェクト: zhangwise/apasvo
def plot_specgram(ax, data, fs, nfft=256, noverlap=128, window='hann',
                  cmap='jet', interpolation='bilinear', rasterized=True):

    if window not in SPECGRAM_WINDOWS:
        raise ValueError("Window not supported")

    elif window == "boxcar":
        mwindow = signal.boxcar(nfft)
    elif window == "hamming":
        mwindow = signal.hamming(nfft)
    elif window == "hann":
        mwindow = signal.hann(nfft)
    elif window == "bartlett":
        mwindow = signal.bartlett(nfft)
    elif window == "blackman":
        mwindow = signal.blackman(nfft)
    elif window == "blackmanharris":
        mwindow = signal.blackmanharris(nfft)

    specgram, freqs, time = mlab.specgram(data, NFFT=nfft, Fs=fs,
                                          window=mwindow,
                                          noverlap=noverlap)
    specgram = 10 * np.log10(specgram[1:, :])
    specgram = np.flipud(specgram)

    freqs = freqs[1:]
    halfbin_time = (time[1] - time[0]) / 2.0
    halfbin_freq = (freqs[1] - freqs[0]) / 2.0
    extent = (time[0] - halfbin_time, time[-1] + halfbin_time,
              freqs[0] - halfbin_freq, freqs[-1] + halfbin_freq)

    ax.imshow(specgram, cmap=cmap, interpolation=interpolation,
                            extent=extent, rasterized=rasterized)
    ax.axis('tight')
コード例 #5
0
def win_sel(win_str, win_size):
    """
        Function returns a window vector based on window name.
        Note class can only use windows found in scipy.signal library.
    """
    overlap = 0
    if (win_str == 'blackmanharris'):
        win = sig.blackmanharris(win_size)
        overlap = .75
    elif (win_str == 'blackman'):
        win = sig.blackman(win_size)
    elif (win_str == 'bartlett'):
        win = sig.bartlett(win_size)
    elif (win_str == 'hamming'):
        win = sig.hamming(win_size)
    elif (win_str == 'hanning'):
        win = sig.hanning(win_size)
    elif (win_str == 'hann'):
        win = sig.hann(win_size)
    elif (win_str == 'barthann'):
        win = sig.barthann(win_size)
    elif (win_str == 'triang'):
        win = sig.triang(win_size)
    elif (win_str == 'rect' or win_str == None):
        win = np.ones(win_size)
    else:
        print('Invalid Window Defined')
        return -1
    return win, overlap
コード例 #6
0
def td_dft(dat, winlen=4096, winoverlap=2048, dftsize=4096, winty='blackman'):
    # calculate the views
    views = view_as_windows(dat,winlen,winlen-winoverlap)

    # generate desired window
    if winty == 'rect':
        win = np.ones(winlen)
    elif winty == 'bartlett':
        win = bartlett(winlen)
    elif winty == 'hann':
        win = hanning(winlen)
    elif winty == 'hamming':
        win = hamming(winlen)
    elif winty == 'blackman':
        win = blackman(winlen)
    else:
        assert False # invalid winty


    # apply window sequence to views
    views = [ v*win for v in views ]

    # computes time aliasing to input sequences if needed
    if winlen > dftsize:
        views = [ time_alias(v,dftsize) for v in views ]

    # apply fft and fftshift to all views
    dfts = [ fftshift(fft(v,dftsize)) for v in views ]

    return np.array(dfts)
コード例 #7
0
def spectrum_wwind(array, time, window='hanning'):  # time should be in seconds
    # Size of array
    Nw = array.shape[0]

    # Calculate time step (assumed to be in seconds)
    dt = time[1] - time[0]

    # prefactor
    # print 'dt = ',dt
    prefactor = dt

    # Calculate array of frequencies, shift
    w = np.fft.fftfreq(Nw, dt)
    w0 = np.fft.fftshift(w)

    # make window
    # blackman window
    if window == 'blackman':
        bwin = blackman(Nw)  # pretty good
    if window == 'hanning':
        bwin = hanning(Nw)  # pretty good
    if window == 'hamming':
        bwin = hamming(Nw)  # not as good
    if window == 'bartlett':
        bwin = bartlett(Nw)  # pretty good
    if window == 'kaiser':
        bwin = kaiser(Nw, 6)
    if window == 'None':
        bwin = 1.0

    # Calculate FFT
    aw = prefactor * np.fft.fft(array * bwin)
    aw0 = np.fft.fftshift(aw)

    # Calcuate Phase
    phase = np.angle(aw)
    phase0 = np.fft.fftshift(phase)

    # Adjust arrays if not div by 2
    if not np.mod(Nw, 2):
        w0 = np.append(w0, -w0[0])
        aw0 = np.append(aw0, -aw0[0])
        phase0 = np.append(phase0, -phase0[0])

    # Cut FFTs in half
    Nwi = Nw // 2
    w2 = w0[Nwi:]
    aw2 = aw0[Nwi:]
    phase2 = phase0[Nwi:]

    comp = aw
    pwr = (np.abs(aw2))**2
    pwr2 = (np.abs(aw))**2
    mag = np.sqrt(pwr)
    cos_phase = np.cos(phase2)
    freq = w2
    freq2 = w

    return freq, freq2, comp, pwr, mag, phase2, cos_phase, dt
コード例 #8
0
def filtering_bartlett(sig, win_size):
    win = signal.bartlett(win_size)
    win = map(lambda x: float(x), win)
    sig = map(lambda x: float(x), sig)
    win = list(win)
    sig = list(sig)

    filtered = signal.convolve(sig, win, mode='same') / sum(win)
    return filtered
コード例 #9
0
    def recompute_window(self):
        zoom = int(self.width * (self.get_eff_sample_rate()) / self.zoom_fac)

        if self.filter == 'kaiser':
            self.window = signal.kaiser(
                freqshow.SDR_SAMPLE_SIZE,
                self.kaiser_beta,
                False,
            )[0:zoom +
              2]  # for every bin there is a window the same exact size as the read samples.
        elif self.filter == 'boxcar':
            self.window = signal.boxcar(
                freqshow.SDR_SAMPLE_SIZE,
                False,
            )[0:zoom + 2]
        elif self.filter == 'hann':
            self.window = signal.hann(
                freqshow.SDR_SAMPLE_SIZE,
                False,
            )[0:zoom + 2]
        elif self.filter == 'hamming':
            self.window = signal.hamming(
                freqshow.SDR_SAMPLE_SIZE,
                False,
            )[0:zoom + 2]
        elif self.filter == 'blackman':
            self.window = signal.blackman(
                freqshow.SDR_SAMPLE_SIZE,
                False,
            )[0:zoom + 2]
        elif self.filter == 'blackmanharris':
            self.window = signal.blackmanharris(
                freqshow.SDR_SAMPLE_SIZE,
                False,
            )[0:zoom + 2]
        elif self.filter == 'bartlett':
            self.window = signal.bartlett(
                freqshow.SDR_SAMPLE_SIZE,
                False,
            )[0:zoom + 2]
        elif self.filter == 'barthann':
            self.window = signal.barthann(
                freqshow.SDR_SAMPLE_SIZE,
                False,
            )[0:zoom + 2]
        elif self.filter == 'nuttall':
            self.window = signal.nuttall(
                freqshow.SDR_SAMPLE_SIZE,
                False,
            )[0:zoom + 2]
        else:
            self.window = 1
コード例 #10
0
ファイル: FourieTransform.py プロジェクト: R-Imai/schoolWork
def mkWindow(com,N):
    if com == "hm":
        win = sg.hamming(N)
    elif com == "hn":
        win = sg.hann(N)
    elif com == "bk":
        win = sg.blackman(N)
    elif com == "ga":
        win = sg.gaussian(N,N/16)
    elif com == "bar":
        win = sg.bartlett(N)
    elif com == "rect":
        win = np.ones(N)
    else :
        usage()
    return win
コード例 #11
0
def plot_specgram(ax,
                  data,
                  fs,
                  nfft=256,
                  noverlap=128,
                  window='hann',
                  cmap='jet',
                  interpolation='bilinear',
                  rasterized=True):

    if window not in SPECGRAM_WINDOWS:
        raise ValueError("Window not supported")

    elif window == "boxcar":
        mwindow = signal.boxcar(nfft)
    elif window == "hamming":
        mwindow = signal.hamming(nfft)
    elif window == "hann":
        mwindow = signal.hann(nfft)
    elif window == "bartlett":
        mwindow = signal.bartlett(nfft)
    elif window == "blackman":
        mwindow = signal.blackman(nfft)
    elif window == "blackmanharris":
        mwindow = signal.blackmanharris(nfft)

    specgram, freqs, time = mlab.specgram(data,
                                          NFFT=nfft,
                                          Fs=fs,
                                          window=mwindow,
                                          noverlap=noverlap)
    specgram = 10 * np.log10(specgram[1:, :])
    specgram = np.flipud(specgram)

    freqs = freqs[1:]
    halfbin_time = (time[1] - time[0]) / 2.0
    halfbin_freq = (freqs[1] - freqs[0]) / 2.0
    extent = (time[0] - halfbin_time, time[-1] + halfbin_time,
              freqs[0] - halfbin_freq, freqs[-1] + halfbin_freq)

    ax.imshow(specgram,
              cmap=cmap,
              interpolation=interpolation,
              extent=extent,
              rasterized=rasterized)
    ax.axis('tight')
コード例 #12
0
#    plt.plot(T,Lt+Zt,color=crta[j],alpha = 0.95,label=r'skupaj $\Delta t=$'+str(dt))#+'{:.{}f}'.format(SHfi, 3 ))
#    plt.xscale('log')
plt.yscale('log')
plt.ylabel('PSD($\omega$)', fontsize=16)
plt.xlabel(r'$\omega$', fontsize=16)
plt.xlim([0, len(SIG) / 2])
#    plt.ylim([0,250])
#    plt.title('Umiranje populacije za različne korake in razlicno velika vzorca')#+'(N='+str(N)+',M='+str(M)+')')
plt.legend(loc=0)

###############################################################################
################  okenske funkcije in odzivi #################################
###############################################################################

windowGauss = signal.gaussian(51, std=6)
windowBartt = signal.bartlett(51)
windowHann = signal.hann(51)
windowCH = signal.chebwin(51, at=100)
windowTuR = signal.tukey(51)

AGauss = np.fft.fft(windowGauss, 2048) / (len(windowGauss) / 2.0)
ABartt = np.fft.fft(windowBartt, 2048) / (len(windowBartt) / 2.0)
AHann = np.fft.fft(windowHann, 2048) / (len(windowHann) / 2.0)
ACH = np.fft.fft(windowCH, 2048) / (len(windowCH) / 2.0)
ATuR = np.fft.fft(windowTuR, 2048) / (len(windowTuR) / 2.0)

freqGauss = np.linspace(-0.5, 0.5, len(AGauss))
freqBartt = np.linspace(-0.5, 0.5, len(ABartt))
freqHann = np.linspace(-0.5, 0.5, len(AHann))
freqCH = np.linspace(-0.5, 0.5, len(ACH))
freqTuR = np.linspace(-0.5, 0.5, len(ATuR))
コード例 #13
0
example demonstrates the spectral leakage for several different windows
(including the boxcar):
"""

fig02 = plt.figure()

# Boxcar with zeroed out fraction
b = sig.boxcar(npts)
zfrac = 0.15
zi = int(npts * zfrac)
b[:zi] = b[-zi:] = 0
name = "Boxcar - zero fraction=%.2f" % zfrac
winspect(b, fig02, name)

winspect(sig.hanning(npts), fig02, "Hanning")
winspect(sig.bartlett(npts), fig02, "Bartlett")
winspect(sig.barthann(npts), fig02, "Modified Bartlett-Hann")

"""

.. image:: fig/multi_taper_spectral_estimation_02.png

As before, the left figure displays the windowing function in the temporal
domain and the figure on the left displays the attentuation of spectral leakage
in the other frequency bands in the spectrum. Notice that though different
windowing functions have different spectral attenuation profiles, trading off
attenuation of leakage from frequency bands near the frequency of interest
(narrow-band leakage) with leakage from faraway frequency bands (broad-band
leakage) they are all superior in both of these respects to the boxcar window
used in the naive periodogram.
コード例 #14
0
# Plot the window and its frequency response:

from scipy import signal
from scipy.fftpack import fft, fftshift
import matplotlib.pyplot as plt

window = signal.bartlett(51)
plt.plot(window)
plt.title("Bartlett window")
plt.ylabel("Amplitude")
plt.xlabel("Sample")

plt.figure()
A = fft(window, 2048) / (len(window)/2.0)
freq = np.linspace(-0.5, 0.5, len(A))
response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
plt.plot(freq, response)
plt.axis([-0.5, 0.5, -120, 0])
plt.title("Frequency response of the Bartlett window")
plt.ylabel("Normalized magnitude [dB]")
plt.xlabel("Normalized frequency [cycles per sample]")
コード例 #15
0
 def test_basic(self):
     assert_allclose(signal.bartlett(6), [0, 0.4, 0.8, 0.8, 0.4, 0])
     assert_allclose(signal.bartlett(7), [0, 1/3, 2/3, 1.0, 2/3, 1/3, 0])
     assert_allclose(signal.bartlett(6, False),
                     [0, 1/3, 2/3, 1.0, 2/3, 1/3])
コード例 #16
0
ファイル: test_windows.py プロジェクト: chris-b1/scipy
 def test_basic(self):
     assert_allclose(signal.bartlett(6), [0, 0.4, 0.8, 0.8, 0.4, 0])
     assert_allclose(signal.bartlett(7), [0, 1/3, 2/3, 1.0, 2/3, 1/3, 0])
     assert_allclose(signal.bartlett(6, False),
                     [0, 1/3, 2/3, 1.0, 2/3, 1/3])
コード例 #17
0
from scipy import signal
from matplotlib import pyplot as plt
from matplotlib import style
import mysignals as sigs
import numpy as np
from scipy.fftpack import fft, fftshift

window = signal.bartlett(51)
plt.plot(window)
plt.title("Bartlett Window")
plt.ylabel("Amplitude")
plt.xlabel("Sample")
plt.show()

#frequency response

plt.figure()

A = fft(window, 2048) / (len(window) / 2.0)
freq = np.linspace(-0.5, 0.5, len(A))
response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
plt.plot(freq, response)
plt.axis([-0.5, 0.5, -120, 0])
plt.title("Frequency Response of Bartlett Window")
plt.ylabel("Normalized Magnitude(dB)")
plt.xlabel("Normalized Frequency in cycles/sample")
plt.show()
コード例 #18
0
(including the boxcar):

"""

fig02 = plt.figure()

# Boxcar with zeroed out fraction
b = sig.boxcar(npts)
zfrac = 0.15
zi = int(npts * zfrac)
b[:zi] = b[-zi:] = 0
name = 'Boxcar - zero fraction=%.2f' % zfrac
winspect(b, fig02, name)

winspect(sig.hanning(npts), fig02, 'Hanning')
winspect(sig.bartlett(npts), fig02, 'Bartlett')
winspect(sig.barthann(npts), fig02, 'Modified Bartlett-Hann')
"""

.. image:: fig/multi_taper_spectral_estimation_02.png

As before, the left figure displays the windowing function in the temporal
domain and the figure on the left displays the attentuation of spectral leakage
in the other frequency bands in the spectrum. Notice that though different
windowing functions have different spectral attenuation profiles, trading off
attenuation of leakage from frequency bands near the frequency of interest
(narrow-band leakage) with leakage from faraway frequency bands (broad-band
leakage) they are all superior in both of these respects to the boxcar window
used in the naive periodogram.

Another approach which deals with both the inefficiency problem and with the
コード例 #19
0
f0 = fs / 4
p0 = 0  # radianes
a0 = 2  # Volts

df = np.random.uniform(-2, 2, k)
fn = f0 + df * fs / N

Ts = 1 / fs
tt = np.linspace(0, (N - 1) * Ts, N)

x = np.transpose(np.vstack([a0 * np.sin(2 * np.pi * ff * tt) for ff in fn]))

ventana = [
    sig.boxcar(N),
    sig.bartlett(N),
    sig.hann(N),
    sig.blackman(N),
    sig.flattop(N)
]

V = len(ventana)
sesgo = np.zeros(V)
var = np.zeros(V)
prom = np.zeros(V)

dist = np.zeros((k, V))

for (vv, this_win) in zip(range(V), ventana):
    X = np.transpose(np.vstack([x[:, kk] * this_win for kk in range(0, k)]))
    X = fft(X, axis=0)
コード例 #20
0
barva = ['r', 'b', 'g', 'k', 'm', 'y', 'c']

DIR = '/home/jernej/Desktop/ModelskaAn/MOJEDELLO/dvanajsta/'
val = ["val2", "val3"]
V = 1
SIG = loadtxt(DIR + val[V] + ".dat")  # branje
LS = len(SIG)  # dolžina  signala

SIG_256 = SIG[0:LS // 2]
SIG_128 = SIG[0:LS // 4]
SIG_64 = SIG[0:LS // 8]

STD = 7
windowGauss = signal.gaussian(LS, std=STD)
windowBartt = signal.bartlett(LS)
windowHann = signal.hann(LS)
windowCH = signal.chebwin(LS, at=100)
windowTuR = signal.tukey(LS)

FTSIG = 2 * abs(np.fft.fft(SIG))**2
FTfreq = np.fft.fftfreq(LS, 1 / len(SIG))
FTSIG_G = 2 * abs(np.fft.fft(SIG * signal.gaussian(LS, std=STD)))**2
FTSIG_B = 2 * abs(np.fft.fft(SIG * signal.bartlett(LS)))**2
FTSIG_H = 2 * abs(np.fft.fft(SIG * signal.hann(LS)))**2
FTSIG_CH = 2 * abs(np.fft.fft(SIG * signal.chebwin(LS, at=100)))**2
FTSIG_T = 2 * abs(np.fft.fft(SIG * signal.tukey(LS)))**2
FTSIG_HCH = 2 * abs(np.fft.fft(SIG * signal.tukey(LS) * signal.hann(LS)))**2

FTSIG_256 = 2 * abs(np.fft.fft(SIG_256))**2
FTfreq_256 = np.fft.fftfreq(LS // 2, 1 / len(SIG_256))
コード例 #21
0
def remix_solo(x):
    """ The core method to analyse, separate, estimate, remix and reconstruct audio mixtures and sources.
        Args:
            x             : (2D ndarray) The two-channel mixture time domain waveform
        Returns:
            x             : (2D ndarray) The two-channel mixture time domain waveform
            yhat          : (array)      Single channel solo instrument time domain waveform
            yhatb         : (2D ndarray) The two-channel accompanying instruments time domain waveform
            ymix          : (2D ndarray) The two-channel remixed time domain waveform
    """
    # Load models using pickle
    print('Loading models')

    # Check for os, to avoid some windows crushes
    plat = sys.platform
    if plat == 'linux' or plat == 'linux2' or plat == 'darwin':
        ww = pickle.load(open('solo_suppression_mag.p', 'rb'),
                         encoding='latin1')
        # ww = pickle.load(open('solo_suppression_mag.p', 'rb'))
        wwpan = pickle.load(open('pannet_mag.p', 'rb'), encoding='latin1')
        # wwpan = pickle.load(open('pannet_mag.p', 'rb'))
    else:
        fileA = open('solo_suppression_mag.p', 'rb')
        ww = pickle.load(fileA, encoding='latin1')
        fileB = open('pannet_mag.p', 'rb')
        wwpan = pickle.load(fileB, encoding='latin1')
        del fileA, fileB

    hop = 512
    N = 4096
    wsz = 2049
    # Left/Right/Mid Analysis
    xL = x[:, 0]
    xR = x[:, 1]
    MmX, MpX = TF.STFT((xL + xR) * 0.5, sig.bartlett(wsz, True), N, hop)
    LmX, LpX = TF.STFT(xL, sig.bartlett(wsz, True), N, hop)
    RmX, RpX = TF.STFT(xR, sig.bartlett(wsz, True), N, hop)

    print('Extracting Solo Information')
    ### Hidden Layer Representation 1
    Trs = sigmoid(np.dot(MmX, ww[2]) + ww[3])
    act = relu(np.dot(MmX, ww[0]) + ww[1])
    act *= Trs
    hl = act + (1. - Trs) * MmX

    ### Hidden Layer Representation 2
    Trs = sigmoid(np.dot(hl, ww[6]) + ww[7])
    act = relu(np.dot(hl, ww[4]) + ww[5])
    act *= Trs
    hl = act + (1. - Trs) * hl

    ### Hidden Layer Representation 3
    Trs = sigmoid(np.dot(hl, ww[10]) + ww[11])
    act = relu(np.dot(hl, ww[8]) + ww[9])
    act *= Trs
    hl = act + (1. - Trs) * hl

    ### Hidden Layer Representation 4
    Trs = sigmoid(np.dot(hl, ww[14]) + ww[15])
    act = relu(np.dot(hl, ww[12]) + ww[13])
    act *= Trs
    hl = act + (1. - Trs) * hl

    ### Output Layer
    Trs = sigmoid(np.dot(hl, ww[18]) + ww[19])
    act = relu(np.dot(hl, ww[16]) + ww[17])
    act *= Trs
    hl = ((act + (1. - Trs) * hl) + eps)

    # Monophonic Solo
    yhat = TF.iSTFT(hl, MpX, wsz, hop)

    # Stereo instrumentation
    print('Estimating accompaniment instrumentation')
    mask = fm(LmX,
              hl, [(LmX - hl).clip(0.)], [], [],
              alpha=1.3,
              method='alphaWiener')
    mshatL = mask(reverse=True)

    mask = fm(RmX,
              hl, [(RmX - hl).clip(0.)], [], [],
              alpha=1.3,
              method='alphaWiener')
    mshatR = mask(reverse=True)

    # Time-domain reconstruction
    yhatbL = TF.iSTFT(mshatL, LpX, wsz, hop)
    yhatbR = TF.iSTFT(mshatR, RpX, wsz, hop)

    yhatb = np.vstack((yhatbL, yhatbR)).T

    # Mixing coefficients Estimation
    print('Estimating Mixing Coefficients')
    ### Hidden Layer Representation 1
    Trs = sigmoid(np.dot(hl, wwpan[2]) + wwpan[3])
    act = relu(np.dot(hl, wwpan[0]) + wwpan[1])
    act *= Trs
    hl = act + (1. - Trs) * hl

    ### Hidden Layer Representation 2
    Trs = sigmoid(np.dot(hl, wwpan[6]) + wwpan[7])
    act = relu(np.dot(hl, wwpan[4]) + wwpan[5])
    act *= Trs
    hl = act + (1. - Trs) * hl

    mix_vec = softmax(np.dot(hl, wwpan[8]) + wwpan[9])
    mix_vec = np.sum(mix_vec, axis=0)

    # Acquiring locations
    degloc = np.argmax(mix_vec[19:])
    gloc = np.argmax(mix_vec[:19])
    mix_vec = np.zeros((40, 1), dtype=np.float32)

    mix_vec[degloc + 19] = 1.
    mix_vec[gloc] = 1.

    print('Performing Mixing')
    degrees, gain = vec2val(mix_vec)
    LGenv, RGenv = pan_gain_env(yhat, degrees, gain)

    ymix = np.vstack((yhat * LGenv, yhat * RGenv)).T + yhatb

    return x, yhat[:x.shape[0]], yhatb[:x.shape[0], :], ymix[:x.shape[0], :]
コード例 #22
0
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 22 00:54:03 2019

@author: fede
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from numpy.fft import fft, fftshift
import scipy.signal as sig

N = 60 # muestras
fftSize = 2048

ventanas = [sig.boxcar(N), sig.bartlett(N), sig.hann(N), sig.blackman(N), sig.flattop(N)]
ventanas_names = ["rectangular", "bartlett", "hanning", "blackman", "flattop"]
V = len(ventanas_names)
plt.figure("Ventanas", figsize = (10,10))

for (vv, this_win) in zip(ventanas_names, ventanas):
       plt.plot(this_win, label=vv)
plt.legend()
plt.grid()       
plt.xlabel("Numero de muestra")
plt.ylabel("Amplitud de la ventana")
plt.title("Forma de ventanas")


complexMat = np.transpose(np.vstack([fft(thisWin,fftSize, axis=0) for thisWin in ventanas]))
#fft(signal, size) Si size > len(signal) la FFT le hace zero padding automaticamente :)
コード例 #23
0
ファイル: BaseBandFilter.py プロジェクト: blu-ray/AM-Radio
 def bartlett(self, delta_w):
     m = int(np.ceil((8 * np.pi) / (2 * delta_w))) + 1
     if m % 2 == 0:
         m = m + 1
     w = signal.bartlett(m)
     return w
コード例 #24
0
center = int(np.floor(N / 2))
modX = modX[center:N]
modX = 20 * np.log10(modX)
freq = np.linspace(0, 0.5, len(modX))
plt.plot(freq, modX)
plt.title("Rectangular", fontsize=20)
plt.xlim(0.2, 0.3)
plt.xlabel("Frecuencia normalizada", fontsize=20)
plt.ylabel("Amplitud en dB", fontsize=20)
plt.grid()

#########################
###ventana bartlett###
#########################
plt.figure("Bartlett")
rectWindow = sig.bartlett(N)
a2dB = -40
a2 = 10**(a2dB / 20)
x1 = np.sin(2 * np.pi * f1 * tt)
x2 = a2 * np.sin(2 * np.pi * f2 * tt)
x = x1 + x2
x = x * rectWindow

X = fft(x)
modX = np.abs(fftshift(X)) * 2 / N
center = int(np.floor(N / 2))
modX = modX[center:N]
modX = 20 * np.log10(modX)
freq = np.linspace(0, 0.5, len(modX))
plt.plot(freq, modX)
plt.title("Bartlett", fontsize=20)
コード例 #25
0
    def get_data(self):
        """Get spectrogram data from the tuner.  Will return width number of
		values which are the intensities of each frequency bucket (i.e. FFT of
		radio samples).
		"""
        # Get width number of raw samples so the number of frequency bins is
        # the same as the display width.  Add two because there will be mean/DC
        # values in the results which are ignored. Increase by 1/self.zoom_fac if needed

        if self.zoom_fac < (self.sdr.sample_rate / 1000000):
            zoom = int(self.width *
                       ((self.sdr.sample_rate / 1000000) / self.zoom_fac))
        else:
            zoom = self.width
            self.zoom_fac = self.get_sample_rate()

        if zoom < freqshow.SDR_SAMPLE_SIZE:
            freqbins = self.sdr.read_samples(freqshow.SDR_SAMPLE_SIZE)[0:zoom +
                                                                       2]
        else:
            zoom = self.width
            self.zoom_fac = self.get_sample_rate()
            freqbins = self.sdr.read_samples(freqshow.SDR_SAMPLE_SIZE)[0:zoom +
                                                                       2]

        # Apply a window function to the sample to remove power in sample sidebands before the fft.

        if self.filter == 'kaiser':
            window = signal.kaiser(
                freqshow.SDR_SAMPLE_SIZE,
                self.kaiser_beta,
                False,
            )[0:zoom +
              2]  # for every bin there is a window the same exact size as the read samples.
        elif self.filter == 'boxcar':
            window = signal.boxcar(
                freqshow.SDR_SAMPLE_SIZE,
                False,
            )[0:zoom + 2]
        elif self.filter == 'hann':
            window = signal.hann(
                freqshow.SDR_SAMPLE_SIZE,
                False,
            )[0:zoom + 2]
        elif self.filter == 'hamming':
            window = signal.hamming(
                freqshow.SDR_SAMPLE_SIZE,
                False,
            )[0:zoom + 2]
        elif self.filter == 'blackman':
            window = signal.blackman(
                freqshow.SDR_SAMPLE_SIZE,
                False,
            )[0:zoom + 2]
        elif self.filter == 'blackmanharris':
            window = signal.blackmanharris(
                freqshow.SDR_SAMPLE_SIZE,
                False,
            )[0:zoom + 2]
        elif self.filter == 'bartlett':
            window = signal.bartlett(
                freqshow.SDR_SAMPLE_SIZE,
                False,
            )[0:zoom + 2]
        elif self.filter == 'barthann':
            window = signal.barthann(
                freqshow.SDR_SAMPLE_SIZE,
                False,
            )[0:zoom + 2]
        elif self.filter == 'nuttall':
            window = signal.nuttall(
                freqshow.SDR_SAMPLE_SIZE,
                False,
            )[0:zoom + 2]
        else:
            window = 1

        samples = freqbins * window

        # Run an FFT and take the absolute value to get frequency magnitudes.
        freqs = np.absolute(fft(samples))

        # Ignore the mean/DC values at the ends.
        freqs = freqs[1:-1]

        # Reverse the order of the freqs array if swaping I and Q
        if self.swap_iq == True:
            freqs = freqs[::-1]

        # Shift FFT result positions to put center frequency in center.
        freqs = np.fft.fftshift(freqs)

        # Truncate the freqs array to the width of the screen if neccesary.
        if freqs.size > self.width:

            freq_step = self.get_freq_step(
            )  # Get the frequency step in Hz between pixels.
            shiftsweep = int(self.get_lo_offset() * 1000000 /
                             freq_step)  # LO offset in pixels.
            extra_samples = int(
                (freqs.size - self.width) / 2
            )  # The excess samples either side of the display width in pixels.

            if extra_samples > abs(
                    shiftsweep
            ):  # check if there is room to shift the array by the LO offset.

                if self.get_swap_iq() == True:
                    lextra = extra_samples + shiftsweep
                elif self.get_swap_iq() == False:
                    lextra = extra_samples - shiftsweep
            else:
                lextra = extra_samples

            rextra = freqs.size - (lextra + self.width)
            freqs = freqs[lextra:-rextra]

        # Convert to decibels.
        freqs = 20.0 * np.log10(freqs)

        # Get signal strength of the center frequency.

        #		for i in range ( 1, 11):
        #			self.sig_strength = (self.get_sig_strength() + freqs[((zoom+2)/2)+i-5])
        #		self.sig_strength = self.get_sig_strength()/10

        # Update model's min and max intensities when auto scaling each value.
        if self.min_auto_scale:
            min_intensity = np.min(freqs)
            self.min_intensity = min_intensity if self.min_intensity is None \
             else min(min_intensity, self.min_intensity)
        if self.max_auto_scale:
            max_intensity = np.max(freqs)
            self.max_intensity = max_intensity if self.max_intensity is None \
             else max(max_intensity, self.max_intensity)
        # Update intensity range (length between min and max intensity).
        self.range = self.max_intensity - self.min_intensity

        # Return frequency intensities.
        return freqs
コード例 #26
0
ファイル: sonorities.py プロジェクト: Js-Mim/aes_wimp
def remix_solo(x):
    """ The core method to analyse, separate, estimate, remix and reconstruct audio mixtures and sources.
        Args:
            x             : (2D ndarray) The two-channel mixture time domain waveform
        Returns:
            x             : (2D ndarray) The two-channel mixture time domain waveform
            yhat          : (array)      Single channel solo instrument time domain waveform
            yhatb         : (2D ndarray) The two-channel accompanying instruments time domain waveform
            ymix          : (2D ndarray) The two-channel remixed time domain waveform
    """
    # Load models using pickle
    print('Loading models')

    # Check for os, to avoid some windows crushes
    plat = sys.platform
    if plat  == 'linux' or plat == 'linux2' or plat == 'darwin' :
        ww = pickle.load(open('solo_suppression_mag.p', 'rb'))
        wwpan = pickle.load(open('pannet_mag.p', 'rb'))
    else :
        fileA = open('solo_suppression_mag.p', 'rb')
        ww = pickle.load(fileA,encoding='latin1')
        fileB = open('pannet_mag.p', 'rb')
        wwpan = pickle.load(fileB,encoding='latin1')
        del fileA, fileB

    hop = 512
    N = 4096
    wsz = 2049
    # Left/Right/Mid Analysis
    xL = x[:, 0]
    xR = x[:, 1]
    MmX, MpX = TF.STFT((xL+xR) * 0.5, sig.bartlett(wsz, True), N, hop)
    LmX, LpX = TF.STFT(xL, sig.bartlett(wsz, True), N, hop)
    RmX, RpX = TF.STFT(xR, sig.bartlett(wsz, True), N, hop)

    print('Extracting Solo Information')
    ### Hidden Layer Representation 1
    Trs = sigmoid(np.dot(MmX, ww[2]) + ww[3])
    act = relu(np.dot(MmX, ww[0]) + ww[1])
    act *= Trs
    hl = act + (1. - Trs) * MmX

    ### Hidden Layer Representation 2
    Trs = sigmoid(np.dot(hl, ww[6]) + ww[7])
    act = relu(np.dot(hl, ww[4]) + ww[5])
    act *= Trs
    hl = act + (1. - Trs) * hl

    ### Hidden Layer Representation 3
    Trs = sigmoid(np.dot(hl, ww[10]) + ww[11])
    act = relu(np.dot(hl, ww[8]) + ww[9])
    act *= Trs
    hl = act + (1. - Trs) * hl

    ### Hidden Layer Representation 4
    Trs = sigmoid(np.dot(hl, ww[14]) + ww[15])
    act = relu(np.dot(hl, ww[12]) + ww[13])
    act *= Trs
    hl = act + (1. - Trs) * hl

    ### Output Layer
    Trs = sigmoid(np.dot(hl, ww[18]) + ww[19])
    act = relu(np.dot(hl, ww[16]) + ww[17])
    act *= Trs
    hl = ((act + (1. - Trs) * hl) + eps)

    # Monophonic Solo
    yhat = TF.iSTFT(hl, MpX, wsz, hop)

    # Stereo instrumentation
    print('Estimating accompaniment instrumentation')
    mask = fm(LmX, hl, [(LmX-hl).clip(0.)], [], [], alpha = 1.3, method = 'alphaWiener')
    mshatL = mask(reverse = True)

    mask = fm(RmX, hl, [(RmX-hl).clip(0.)], [], [], alpha = 1.3, method = 'alphaWiener')
    mshatR = mask(reverse = True)

    # Time-domain reconstruction
    yhatbL = TF.iSTFT(mshatL, LpX, wsz, hop)
    yhatbR = TF.iSTFT(mshatR, RpX, wsz, hop)

    yhatb = np.vstack((yhatbL, yhatbR)).T

    # Mixing coefficients Estimation
    print('Estimating Mixing Coefficients')
    ### Hidden Layer Representation 1
    Trs = sigmoid(np.dot(hl, wwpan[2]) + wwpan[3])
    act = relu(np.dot(hl, wwpan[0]) + wwpan[1])
    act *= Trs
    hl = act + (1. - Trs) * hl

    ### Hidden Layer Representation 2
    Trs = sigmoid(np.dot(hl, wwpan[6]) + wwpan[7])
    act = relu(np.dot(hl, wwpan[4]) + wwpan[5])
    act *= Trs
    hl = act + (1. - Trs) * hl

    mix_vec = softmax(np.dot(hl, wwpan[8]) + wwpan[9])
    mix_vec = np.sum(mix_vec, axis=0)

    # Acquiring locations
    degloc = np.argmax(mix_vec[19:])
    gloc = np.argmax(mix_vec[:19])
    mix_vec = np.zeros((40,1), dtype = np.float32)

    mix_vec[degloc + 19] = 1.
    mix_vec[gloc] = 1.

    print('Performing Mixing')
    degrees, gain = vec2val(mix_vec)
    LGenv, RGenv = pan_gain_env(yhat, degrees, gain)

    ymix = np.vstack((yhat * LGenv, yhat * RGenv)).T + yhatb

    return x, yhat[:x.shape[0]], yhatb[:x.shape[0], :], ymix[:x.shape[0], :]
コード例 #27
0
example demonstrates the spectral leakage for several different windows
(including the boxcar):
"""

fig02 = plt.figure()

# Boxcar with zeroed out fraction
b = sig.boxcar(npts)
zfrac = 0.15
zi = int(npts*zfrac)
b[:zi] = b[-zi:] = 0
name = 'Boxcar - zero fraction=%.2f' % zfrac
winspect(b, fig02, name)

winspect(sig.hanning(npts), fig02, 'Hanning')
winspect(sig.bartlett(npts), fig02, 'Bartlett')
winspect(sig.barthann(npts), fig02, 'Modified Bartlett-Hann')

""" 

.. image:: fig/multi_taper_spectral_estimation_02.png

As before, the left figure displays the windowing function in the temporal
domain and the figure on the left displays the attentuation of spectral leakage
in the other frequency bands in the spectrum. Notice that though different
windowing functions have different spectral attenuation profiles, trading off
attenuation of leakage from frequency bands near the frequency of interest
(narrow-band leakage) with leakage from faraway frequency bands (broad-band
leakage) they are all superior in both of these respects to the boxcar window
used in the naive periodogram. 
コード例 #28
0
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as sig
from scipy.fftpack import fft, fftshift

N = 1000
fs = 1000
Ts = 1 / fs

tt = np.linspace(0, (N - 1) * Ts, N)

ventanas = [
    1,
    sig.boxcar(N),
    sig.bartlett(N),
    sig.hann(N),
    sig.blackman(N),
    sig.flattop(N)
]

##Rect
d = 2.8
f1 = fs / 4 + 0.5 * fs / N
f2 = f1 + d * (fs / N)
a2 = 1
x1 = np.sin(2 * np.pi * f1 * tt)
x2 = a2 * np.sin(2 * np.pi * f2 * tt)
x = x1 + x2
xw = x * sig.boxcar(N)
X = fft(xw)