示例#1
0
    def test_cross_coupling_xtalk(self):
        # introduce a cross reflection at a single delay
        outvis = sigchain.gen_cross_coupling_xtalk(self.freqs, self.Tsky, amp=1e-2, dly=300, phs=1)
        ovfft = np.fft.fft(
            outvis * windows.blackmanharris(len(self.freqs))[None, :], axis=1
        )

        # take covariance across time and assert delay 300 is highly covariant
        # compared to neighbors
        cov = np.cov(ovfft.T)
        mcov = np.mean(np.abs(cov), axis=0)
        select = np.argsort(np.abs(self.dlys - 300))[:10]
        nt.assert_almost_equal(self.dlys[select][np.argmax(mcov[select])], 300.0)
        # inspect for yourself: plt.matshow(np.log10(np.abs(cov)))

        # conjugate it and assert it shows up at -300
        outvis = sigchain.gen_cross_coupling_xtalk(self.freqs, self.Tsky, amp=1e-2, dly=300, phs=1, conj=True)
        ovfft = np.fft.fft(
            outvis * windows.blackmanharris(len(self.freqs))[None, :], axis=1
        )
        cov = np.cov(ovfft.T)
        mcov = np.mean(np.abs(cov), axis=0)
        select = np.argsort(np.abs(self.dlys - -300))[:10]
        nt.assert_almost_equal(self.dlys[select][np.argmax(mcov[select])], -300.0)

        # assert its phase stable across time
        select = np.argmin(np.abs(self.dlys - -300))
        nt.assert_true(
            np.isclose(np.angle(ovfft[:, select]), -1, atol=1e-4, rtol=1e-4).all()
        )
示例#2
0
 def test_basic(self):
     assert_allclose(windows.blackmanharris(6, False),
                     [6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645])
     assert_allclose(windows.blackmanharris(7, sym=False),
                     [6.0e-05, 0.03339172347815117, 0.332833504298565,
                      0.8893697722232837, 0.8893697722232838,
                      0.3328335042985652, 0.03339172347815122])
     assert_allclose(windows.blackmanharris(6),
                     [6.0e-05, 0.1030114893456638, 0.7938335106543362,
                      0.7938335106543364, 0.1030114893456638, 6.0e-05])
     assert_allclose(windows.blackmanharris(7, sym=True),
                     [6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645,
                      6.0e-05])
示例#3
0
 def test_basic(self):
     assert_allclose(windows.blackmanharris(6, False),
                     [6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645])
     assert_allclose(windows.blackmanharris(7, sym=False),
                     [6.0e-05, 0.03339172347815117, 0.332833504298565,
                      0.8893697722232837, 0.8893697722232838,
                      0.3328335042985652, 0.03339172347815122])
     assert_allclose(windows.blackmanharris(6),
                     [6.0e-05, 0.1030114893456638, 0.7938335106543362,
                      0.7938335106543364, 0.1030114893456638, 6.0e-05])
     assert_allclose(windows.blackmanharris(7, sym=True),
                     [6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645,
                      6.0e-05])
示例#4
0
def spatial_to_wn(eta_grid, kmax):
    """Synthesize spatial grid to estimate wavenumber spectrum"""
    win_len = eta_grid.shape[0]
    # synthesize wave number field
    win_2D = blackmanharris(win_len)[:,
                                     None] * blackmanharris(win_len)[None, :]
    s2 = np.sum(win_2D**2)

    # transform and scale result
    syn_kxky = np.abs(np.fft.rfft2(win_2D * eta_grid, axes=(1, 0)))**2
    syn_kxky *= 2 / (kmax * s2)

    # the fft shift makes thinking about this easier
    syn_kxky = np.fft.fftshift(syn_kxky, axes=1)
    return syn_kxky
示例#5
0
    def test_reflection_gains(self):
        # introduce a cable reflection into the autocorrelation
        gains = sigchain.gen_reflection_gains(self.freqs, [0], amp=[1e-1], dly=[300], phs=[1])
        outvis = sigchain.apply_gains(self.vis, gains, [0, 0])
        ovfft = np.fft.fft(
            outvis * windows.blackmanharris(len(self.freqs))[None, :], axis=1
        )

        # assert reflection is at +300 ns and check its amplitude
        select = self.dlys > 200
        nt.assert_almost_equal(
            self.dlys[select][np.argmax(np.mean(np.abs(ovfft), axis=0)[select])], 300
        )
        select = np.argmin(np.abs(self.dlys - 300))
        m = np.mean(np.abs(ovfft), axis=0)
        nt.assert_true(np.isclose(m[select] / m[0], 1e-1, atol=1e-2))

        # assert also reflection at -300 ns
        select = self.dlys < -200
        nt.assert_almost_equal(
            self.dlys[select][np.argmax(np.mean(np.abs(ovfft), axis=0)[select])], -300
        )
        select = np.argmin(np.abs(self.dlys - -300))
        m = np.mean(np.abs(ovfft), axis=0)
        nt.assert_true(np.isclose(m[select] / m[0], 1e-1, atol=1e-2))
    def freq_from_fft(self, p, threshold=5000):
        """
        Estimate frequency from peak of FFT
        """
        # Debugging.
        # start_time = time.time()
        bits_per_sample = p.get_sample_size(self.FORMAT) * 8
        dtype = 'int{0}'.format(bits_per_sample)
        sig = np.frombuffer(b''.join(self.frames), dtype)
        fs = self.RATE

        # Compute Fourier transform of windowed signal
        windowed = sig * blackmanharris(len(sig))
        f = rfft(windowed)

        # Find the peak and interpolate to get a more accurate peak
        i = np.argmax(abs(f))  # Just use this for less-accurate, naive version
        true_i = self.parabolic(np.log(abs(f)), i)[0]

        # Convert to equivalent frequency
        if (fs * true_i / len(windowed)) > threshold:
            if self.scratching == False:
                print("Sent message!")
                arrow_down_event = pg.event.Event(pg.KEYDOWN, key=pg.K_RIGHT)
                pg.event.post(arrow_down_event)
                self.scratching = True
            self.scratch_started_time = time.time()
            # print(self.scratch_started_time) # Debugging
            print('Scratch detected!')
        print(fs * true_i / len(windowed))
示例#7
0
    def nuta(self, samplingFrequency, signalData):

        windowed = signalData * blackmanharris(len(signalData))
        f = rfft(windowed)
        i = argmax(abs(f))
        a = samplingFrequency * i / len(windowed)
        print(a)
        note = ("Nie mozna rozpoznać.")
        if (a >= 390 and a < 403.5) or (a >= 762 and a < 790):
            note = "Nuta - G"
        elif a >= 403.5 and a < 427.5:
            note = ("Nuta - G#")
        elif a >= 427.5 and a < 451.5:
            note = ("Nuta - A")
        elif a >= 451.5 and a < 479.5:
            note = ("Nuta - A#")
        elif a >= 479.5 and a < 508:
            note = ("Nuta - B")
        elif a >= 508 and a < 538.5:
            note = ("Nuta - C")
        elif a >= 538.5 and a < 570.5:
            note = ("Nuta - C#")
        elif a >= 570.5 and a < 604.5:
            note = ("Nuta - D")
        elif a >= 604.5 and a < 640.5:
            note = ("Nuta - D#")
        elif a >= 640.5 and a < 678.5:
            note = ("Nuta - E")
        elif a >= 678.5 and a < 719:
            note = ("Nuta - F")
        elif a >= 719 and a < 762:
            note = ("Nuta - F#")

        tk.messagebox.showinfo("NUTA", note)
示例#8
0
    def __init__(self,
                 sample_file: str = DEFAULT_SOURCE,
                 output_dir='./output/'):
        # read audio file
        data, sample_rate = load_audio(sample_file)
        assert sample_rate in (
            8000,
            16000,
        ), 'unsupported sample rate, valid: 8000, 16000)'
        self._sample_rate: int = sample_rate

        # force to signal channel, as ITU PESQ implementation doesn't support multi channels
        self._sample_audio: np.ndarray = force_single_channel(data)

        # generate signal wave form
        _chirp_size = self._sample_rate
        self._chirp = chirp(self._sample_rate,
                            _chirp_size) * blackmanharris(_chirp_size)
        self._norm_chirp = normalize(self._chirp)

        self._guard = np.zeros(self._sample_rate * 1)
        self._sample_audio_with_guard = np.concatenate(
            (self._guard, self._chirp, self._guard, self._sample_audio))

        # others
        self._output_dir = output_dir
        self._file_no = 0
示例#9
0
    def t_wykres(self):
        samplingFrequency, signalData = wavfile.read(self.filename)

        if len(signalData.shape) == 2:
            signalData = signalData[:, 0]
        fs = samplingFrequency
        fragment = signalData[:2048]
        fragment = fragment / np.max(np.abs(fragment))
        widmo = 20 * np.log10(
            np.abs(np.fft.rfft(fragment * np.hamming(2048))) / 1024)
        f = np.fft.rfftfreq(2048, 1 / fs)
        f = f / 1000
        self.wamplituda.clear()
        self.wamplituda.grid(True)
        self.wamplituda.set_xlim(xmin=0)
        self.wamplituda.plot(f, widmo)
        self.wamplituda.set_xlim(xmin=0, xmax=4)
        self.wamplituda.set_ylim(ymin=-90, ymax=0)

        self.waveform_canvas.draw()
        windowed = signalData * blackmanharris(len(signalData))
        f = rfft(windowed)
        i = argmax(abs(f))
        a = samplingFrequency * i / len(windowed)
        print(a)
        self.nuta(samplingFrequency, signalData)
示例#10
0
def stochasticResidualAnal(x, N, H, sfreq, smag, sphase, fs, stocf):
    """
	Subtract sinusoids from a sound and approximate the residual with an envelope
	x: input sound, N: fft size, H: hop-size
	sfreq, smag, sphase: sinusoidal frequencies, magnitudes and phases
	fs: sampling rate; stocf: stochastic factor, used in the approximation
	returns stocEnv: stochastic approximation of residual 
	"""

    hN = N / 2  # half of fft size
    x = np.append(
        np.zeros(hN),
        x)  # add zeros at beginning to center first window at sample 0
    x = np.append(x,
                  np.zeros(hN))  # add zeros at the end to analyze last sample
    bh = blackmanharris(N)  # synthesis window
    w = bh / sum(bh)  # normalize synthesis window
    L = sfreq.shape[0]  # number of frames, this works if no sines
    pin = 0
    for l in range(L):
        xw = x[pin:pin + N] * w  # window the input sound
        X = fft(fftshift(xw))  # compute FFT
        Yh = UF_C.genSpecSines(N * sfreq[l, :] / fs, smag[l, :], sphase[l, :],
                               N)  # generate spec sines
        Xr = X - Yh  # subtract sines from original spectrum
        mXr = 20 * np.log10(abs(Xr[:hN]))  # magnitude spectrum of residual
        mXrenv = resample(np.maximum(-200, mXr),
                          mXr.size * stocf)  # decimate the mag spectrum
        if l == 0:  # if first frame
            stocEnv = np.array([mXrenv])
        else:  # rest of frames
            stocEnv = np.vstack((stocEnv, np.array([mXrenv])))
        pin += H  # advance sound pointer
    return stocEnv
示例#11
0
def harmonicModel(x, fs, w, N, t, nH, minf0, maxf0, f0et):
	"""
	Analysis/synthesis of a sound using the sinusoidal harmonic model
	x: input sound, fs: sampling rate, w: analysis window, 
	N: FFT size (minimum 512), t: threshold in negative dB, 
	nH: maximum number of harmonics, minf0: minimum f0 frequency in Hz, 
	maxf0: maximim f0 frequency in Hz, 
	f0et: error threshold in the f0 detection (ex: 5),
	returns y: output array sound
	"""

	hN = N/2                                                # size of positive spectrum
	hM1 = int(math.floor((w.size+1)/2))                     # half analysis window size by rounding
	hM2 = int(math.floor(w.size/2))                         # half analysis window size by floor
	x = np.append(np.zeros(hM2),x)                          # add zeros at beginning to center first window at sample 0
	x = np.append(x,np.zeros(hM1))                          # add zeros at the end to analyze last sample
	Ns = 512                                                # FFT size for synthesis (even)
	H = Ns/4                                                # Hop size used for analysis and synthesis
	hNs = Ns/2      
	pin = max(hNs, hM1)                                     # init sound pointer in middle of anal window          
	pend = x.size - max(hNs, hM1)                           # last sample to start a frame
	fftbuffer = np.zeros(N)                                 # initialize buffer for FFT
	yh = np.zeros(Ns)                                       # initialize output sound frame
	y = np.zeros(x.size)                                    # initialize output array
	w = w / sum(w)                                          # normalize analysis window
	sw = np.zeros(Ns)                                       # initialize synthesis window
	ow = triang(2*H)                                        # overlapping window
	sw[int(hNs-H):int(hNs+H)] = int(ow)      
	bh = blackmanharris(Ns)                                 # synthesis window
	bh = bh / sum(bh)                                       # normalize synthesis window
	sw[int(hNs-H):int(hNs+H)] = sw[int(hNs-H):int(hNs+H)] / bh[int(hNs-H):int(hNs+H)]     # window for overlap-add
	hfreqp = []
	f0t = 0
	f0stable = 0
	while pin<pend:             
	#-----analysis-----             
		x1 = x[pin-hM1:pin+hM2]                               # select frame
		mX, pX = DFT.dftAnal(x1, w, N)                        # compute dft
		ploc = UF.peakDetection(mX, t)                        # detect peak locations     
		iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)   # refine peak values
		ipfreq = fs * iploc/N
		f0t = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0, f0stable)  # find f0
		if ((f0stable==0)&(f0t>0)) \
				or ((f0stable>0)&(np.abs(f0stable-f0t)<f0stable/5.0)):
			f0stable = f0t                                     # consider a stable f0 if it is close to the previous one
		else:
			f0stable = 0
		hfreq, hmag, hphase = harmonicDetection(ipfreq, ipmag, ipphase, f0t, nH, hfreqp, fs) # find harmonics
		hfreqp = hfreq
	#-----synthesis-----
		Yh = UF.genSpecSines(hfreq, hmag, hphase, Ns, fs)     # generate spec sines          
		fftbuffer = np.real(ifft(Yh))                         # inverse FFT
		yh[:int(hNs-1)] = fftbuffer[int(hNs+1):]                        # undo zero-phase window
		yh[int(hNs-1):] = fftbuffer[:int(hNs+1)] 
		y[pin-hNs:pin+hNs] += sw*yh                           # overlap-add
		pin += H                                              # advance sound pointer
	y = np.delete(y, range(hM2))                            # delete half of first window which was added in stftAnal
	y = np.delete(y, range(y.size-hM1, y.size))             # add zeros at the end to analyze last sample
	return y
    def update(self):
        """
        Retrieve and process data from the queue to update the
        PyQt plots
        """

        try:
            # get and pre-process data
            data = self.queue.get()
            data = self.decode(data, self.channels,
                               np.int16) * self.overall_amp_factor

            # calculate waveform of data
            y_wav = np.mean(data, axis=1)
            y_wav = y_wav / self.max_freq  # shifts y_wav to [-1, 1]

            # smooths waveform values to be more easy on the eyes
            if self.prev_y_wav is not None:
                y_wav = (1 - self.wav_decay_speed
                         ) * self.prev_y_wav + self.wav_decay_speed * y_wav

            # apply blackman-harris window to data to normalize values a bit
            window = blackmanharris(self.chunk_size)
            data = window * np.mean(data, axis=1)

            # calculate fourier transform of data
            y_fft = np.abs(np.fft.rfft(data, n=self.fft_size * 2))
            y_fft = np.delete(y_fft, len(y_fft) - 1)
            y_fft = y_fft * 2 / (self.max_freq * 256)  # shifts y_fft to [0, 1]

            # gets highest bass frequency
            bass = np.max(y_fft[0:self.bass_index])

            # smooths bass values
            if self.prev_bass is not None:
                bass = (1 - self.bass_decay_speed
                        ) * self.prev_bass + self.bass_decay_speed * bass

            # smooths frequency spectrum values to be more easy on the eyes
            if self.prev_y_fft is not None:
                y_fft = (1 - self.fft_decay_speed
                         ) * self.prev_y_fft + self.fft_decay_speed * y_fft

            # draws data
            self.draw_data(
                np.sign(y_wav) * (np.abs(y_wav)**self.wav_amp_factor),
                y_fft[self.low_index:self.high_index]**self.fft_amp_factor,
                bass**self.bass_amp_factor)

            # previous value updates
            self.prev_y_wav = y_wav
            self.prev_y_fft = y_fft
            self.prev_bass = bass

            self.frame_count += 1
        except:
            pass
示例#13
0
def array_factor(number_of_elements, scan_angle, element_spacing, frequency,
                 theta, window_type, side_lobe_level):
    """
    Calculate the array factor for a linear binomial excited array.
    :param window_type: The string name of the window.
    :param side_lobe_level: The sidelobe level for Tschebyscheff window (dB).
    :param number_of_elements: The number of elements in the array.
    :param scan_angle: The angle to which the main beam is scanned (rad).
    :param element_spacing: The distance between elements.
    :param frequency: The operating frequency (Hz).
    :param theta: The angle at which to evaluate the array factor (rad).
    :return: The array factor as a function of angle.
    """
    # Calculate the wavenumber
    k = 2.0 * pi * frequency / c

    # Calculate the phase
    psi = k * element_spacing * (cos(theta) - cos(scan_angle))

    # Calculate the coefficients
    if window_type == 'Uniform':
        coefficients = ones(number_of_elements)
    elif window_type == 'Binomial':
        coefficients = binom(number_of_elements - 1,
                             range(0, number_of_elements))
    elif window_type == 'Tschebyscheff':
        warnings.simplefilter("ignore", UserWarning)
        coefficients = chebwin(number_of_elements,
                               at=side_lobe_level,
                               sym=True)
    elif window_type == 'Kaiser':
        coefficients = kaiser(number_of_elements, 6, True)
    elif window_type == 'Blackman-Harris':
        coefficients = blackmanharris(number_of_elements, True)
    elif window_type == 'Hanning':
        coefficients = hanning(number_of_elements, True)
    elif window_type == 'Hamming':
        coefficients = hamming(number_of_elements, True)

    # Calculate the offset for even/odd
    offset = int(floor(number_of_elements / 2))

    # Odd case
    if number_of_elements & 1:
        coefficients = roll(coefficients, offset + 1)
        coefficients[0] *= 0.5
        af = sum(coefficients[i] * cos(i * psi) for i in range(offset + 1))
        return af / amax(abs(af))
    # Even case
    else:
        coefficients = roll(coefficients, offset)
        af = sum(coefficients[i] * cos((i + 0.5) * psi) for i in range(offset))
        return af / amax(abs(af))
示例#14
0
def get_peak(data):
    """
Peak frequency from signal
    :param data: signal
    :return: peak frequency
    """
    data = data / max(data)
    data = data * blackmanharris(len(data))
    data = abs(rfft(data))

    f_peak = 44100 * argmax(data) / len(data) / 2

    return f_peak
示例#15
0
def _get_peak_array(waveforms, peaks):
    """Return a binary array of contiguous peak sections."""
    # Create empty array with length of waveform
    peak_array = np.zeros(waveforms.shape[0], dtype=np.float32)
    window = blackmanharris(21)
    if peaks:
        for peak_ids in peaks:
            if peak_ids:
                for peak_id in peak_ids:
                    if len(peak_array[peak_id - 10:peak_id + 11]) >= 21:
                        peak_array[peak_id - 10:peak_id + 11] += window
        peak_array[peak_array <= 1] = 0
        peak_array /= np.max(peak_array)
    return peak_array
示例#16
0
def sineModel(x, fs, w, N, t):
    """
	Analysis/synthesis of a sound using the sinusoidal model, without sine tracking
	x: input array sound, w: analysis window, N: size of complex spectrum, t: threshold in negative dB 
	returns y: output array sound
	"""

    hM1 = int(math.floor(
        (w.size + 1) / 2))  # half analysis window size by rounding
    hM2 = int(math.floor(w.size / 2))  # half analysis window size by floor
    Ns = 512  # FFT size for synthesis (even)
    H = Ns / 4  # Hop size used for analysis and synthesis
    hNs = Ns / 2  # half of synthesis FFT size
    pin = max(hNs, hM1)  # init sound pointer in middle of anal window
    pend = x.size - max(hNs, hM1)  # last sample to start a frame
    fftbuffer = np.zeros(N)  # initialize buffer for FFT
    yw = np.zeros(Ns)  # initialize output sound frame
    y = np.zeros(x.size)  # initialize output array
    w = w / sum(w)  # normalize analysis window
    sw = np.zeros(Ns)  # initialize synthesis window
    ow = triang(2 * H)  # triangular window
    sw[hNs - H:hNs + H] = ow  # add triangular window
    bh = blackmanharris(Ns)  # blackmanharris window
    bh = bh / sum(bh)  # normalized blackmanharris window
    sw[hNs - H:hNs +
       H] = sw[hNs - H:hNs + H] / bh[hNs - H:hNs +
                                     H]  # normalized synthesis window
    while pin < pend:  # while input sound pointer is within sound
        #-----analysis-----
        x1 = x[pin - hM1:pin + hM2]  # select frame
        mX, pX = DFT.dftAnal(x1, w, N)  # compute dft
        ploc = UF.peakDetection(mX, t)  # detect locations of peaks
        pmag = mX[ploc]  # get the magnitude of the peaks
        iploc, ipmag, ipphase = UF.peakInterp(
            mX, pX, ploc)  # refine peak values by interpolation
        ipfreq = fs * iploc / float(N)  # convert peak locations to Hertz
        #-----synthesis-----
        Y = UF.genSpecSines(ipfreq, ipmag, ipphase, Ns,
                            fs)  # generate sines in the spectrum
        fftbuffer = np.real(ifft(Y))  # compute inverse FFT
        yw[:hNs - 1] = fftbuffer[hNs + 1:]  # undo zero-phase window
        yw[hNs - 1:] = fftbuffer[:hNs + 1]
        y[pin - hNs:pin +
          hNs] += sw * yw  # overlap-add and apply a synthesis window
        pin += H  # advance sound pointer
    return y
示例#17
0
def blackmanharris(dataset, **kwargs):
    """
    Calculate a minimum 4-term Blackman-Harris apodization.

    For multidimensional NDDataset,
    the apodization is by default performed on the last dimension.

    The data in the last dimension MUST be time-domain or dimensionless,
    otherwise an error is raised.


    Parameters
    ----------
    dataset : dataset
        Input dataset.

    Returns
    -------
    apodized
        dataset.
    apod_arr
        The apodization array only if 'retapod' is True.

    Other Parameters
    ----------------
    dim : str or int, keyword parameter, optional, default='x'
        Specify on which dimension to apply the apodization method. If `dim` is specified as an integer it is equivalent
        to the usual `axis` numpy parameter.
    inv : bool, keyword parameter, optional, default=False
        True for inverse apodization.
    rev : bool, keyword parameter, optional, default=False
        True to reverse the apodization before applying it to the data.
    inplace : bool, keyword parameter, optional, default=False
        True if we make the transform inplace.  If False, the function return a new datase
    retapod : bool, keyword parameter, optional, default=False
        True to return the apodization array along with the apodized object.

    """

    x = dataset

    return x * windows.blackmanharris(len(x), sym=True)
def sineModelSynth(tfreq, tmag, tphase, N, H, fs):
    """
	Synthesis of a sound using the sinusoidal model
	tfreq,tmag,tphase: frequencies, magnitudes and phases of sinusoids
	N: synthesis FFT size, H: hop size, fs: sampling rate
	returns y: output array sound
	"""

    hN = N // 2  # half of FFT size for synthesis
    L = tfreq.shape[0]  # number of frames
    pout = 0  # initialize output sound pointer
    ysize = H * (L + 3)  # output sound size
    y = np.zeros(ysize)  # initialize output array
    sw = np.zeros(N)  # initialize synthesis window
    ow = triang(2 * H)  # triangular window
    sw[hN - H:hN + H] = ow  # add triangular window
    bh = blackmanharris(N)  # blackmanharris window
    bh = bh / sum(bh)  # normalized blackmanharris window
    sw[hN - H:hN +
       H] = sw[hN - H:hN + H] / bh[hN - H:hN +
                                   H]  # normalized synthesis window
    lastytfreq = tfreq[0, :]  # initialize synthesis frequencies
    ytphase = 2 * np.pi * np.random.rand(
        tfreq[0, :].size)  # initialize synthesis phases
    for l in range(L):  # iterate over all frames
        if (tphase.size > 0):  # if no phases generate them
            ytphase = tphase[l, :]
        else:
            ytphase += (np.pi * (lastytfreq + tfreq[l, :]) /
                        fs) * H  # propagate phases
        Y = UF.genSpecSines(tfreq[l, :], tmag[l, :], ytphase, N,
                            fs)  # generate sines in the spectrum
        lastytfreq = tfreq[l, :]  # save frequency for phase propagation
        ytphase = ytphase % (2 * np.pi)  # make phase inside 2*pi
        yw = np.real(fftshift(ifft(Y)))  # compute inverse FFT
        y[pout:pout + N] += sw * yw  # overlap-add and apply a synthesis window
        pout += H  # advance sound pointer
    y = np.delete(y, range(hN))  # delete half of first window
    y = np.delete(y, range(y.size - hN,
                           y.size))  # delete half of the last window
    return y
示例#19
0
def get_fft_window(window_type, window_length):
    # Generate the window with the right number of points
    window = None
    if window_type == "Bartlett":
        window = windows.bartlett(window_length)
    if window_type == "Blackman":
        window = windows.blackman(window_length)
    if window_type == "Blackman Harris":
        window = windows.blackmanharris(window_length)
    if window_type == "Flat Top":
        window = windows.flattop(window_length)
    if window_type == "Hamming":
        window = windows.hamming(window_length)
    if window_type == "Hanning":
        window = windows.hann(window_length)

    # If no window matched, use a rectangular window
    if window is None:
        window = np.ones(window_length)

    # Return the window
    return window
示例#20
0
def sineSubtraction(x, N, H, sfreq, smag, sphase, fs):
    """
	Subtract sinusoids from a sound
	x: input sound, N: fft-size, H: hop-size
	sfreq, smag, sphase: sinusoidal frequencies, magnitudes and phases
	returns xr: residual sound 
	"""

    hN = N / 2  # half of fft size
    x = np.append(
        np.zeros(hN),
        x)  # add zeros at beginning to center first window at sample 0
    x = np.append(x,
                  np.zeros(hN))  # add zeros at the end to analyze last sample
    bh = blackmanharris(N)  # blackman harris window
    w = bh / sum(bh)  # normalize window
    sw = np.zeros(N)  # initialize synthesis window
    sw[hN - H:hN + H] = triang(2 * H) / w[hN - H:hN + H]  # synthesis window
    L = sfreq.shape[0]  # number of frames, this works if no sines
    xr = np.zeros(x.size)  # initialize output array
    pin = 0
    for l in range(L):
        xw = x[pin:pin + N] * w  # window the input sound
        X = fft(fftshift(xw))  # compute FFT
        Yh = UF_C.genSpecSines(N * sfreq[l, :] / fs, smag[l, :], sphase[l, :],
                               N)  # generate spec sines
        Xr = X - Yh  # subtract sines from original spectrum
        xrw = np.real(fftshift(ifft(Xr)))  # inverse FFT
        xr[pin:pin + N] += xrw * sw  # overlap-add
        pin += H  # advance sound pointer
    xr = np.delete(
        xr,
        range(hN))  # delete half of first window which was added in stftAnal
    xr = np.delete(xr, range(
        xr.size - hN,
        xr.size))  # delete half of last window which was added in stftAnal
    return xr
"""
f = fft(data_x[0][0])
g = fftfreq(n=data_x[0][0].size, d=1/sampling_freq)
print(f.shape)
print(g.shape)
"""

#窓関数処理
N = extraction_freq #データ数
#ハニング窓
#hanning_window = windows.hann(N)
hanning_window = np.hanning(N)
#ハミング窓
hamming_window = windows.hamming(N)
#ブラックマン窓
black_window   = windows.blackmanharris(N)

"""
plt.plot(x,hanning_window)
plt.plot(x,hamming_window)
plt.plot(x,black_window)
plt.show()
"""

a = 0
plt.plot(x,data_x[0][a])
plt.show()

data_x *= hanning_window
#data_x[:][:] /= np.amax(data_x[0][a])
示例#22
0
from scipy.io import arff
import pandas as pd
import librosa as lib
from scipy.signal import windows as win
from collections import namedtuple
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelEncoder
import os, glob

window = win.blackmanharris(2048)
folder_length = 89
root_folder = '../Musically-Motivated-CNN/data/'
steps_back = '../../'

LabeledChunk = namedtuple('Chunk', ['label', 'spectrogram'])
genres = [
    'blues', 'classical', 'country', 'disco', 'hiphop', 'jazz', 'metal', 'pop',
    'reggae', 'rock'
]
chunk_len = 80


def extract_labeled_spectrogram():

    labeled_dataset = []

    print('starting feature exctration')

    for genre in genres:
        os.chdir(root_folder + genre)
        for file in glob.glob('*.au'):
    def _update_canvas(self):
        """
        Update the figure when the user changes an input value
        :return:
        """
        # Get the parameters from the form
        number_of_steps = int(self.number_of_steps.text())
        frequency_step = float(self.frequency_step.text())
        prf = float(self.prf.text())
        target_range = self.target_range.text().split(',')
        target_rcs = self.target_rcs.text().split(',')
        target_velocity = self.target_velocity.text().split(',')

        t_range = [float(r) for r in target_range]
        t_rcs = [float(r) for r in target_rcs]
        t_velocity = [float(v) for v in target_velocity]

        # Get the selected window from the form
        window_type = self.window_type.currentText()

        if window_type == 'Kaiser':
            coefficients = kaiser(number_of_steps, 6, True)
        elif window_type == 'Blackman-Harris':
            coefficients = blackmanharris(number_of_steps, True)
        elif window_type == 'Hanning':
            coefficients = hanning(number_of_steps, True)
        elif window_type == 'Hamming':
            coefficients = hamming(number_of_steps, True)
        elif window_type == 'Rectangular':
            coefficients = ones(number_of_steps)

        # Calculate the base band return signal
        s = zeros(number_of_steps, dtype=complex)

        for rng, rcs, v in zip(t_range, t_rcs, t_velocity):
            s += [sqrt(rcs) * exp(-1j * 4.0 * pi / c * (i * frequency_step) * (rng - v * (i / prf)))
                  for i in range(number_of_steps)]

            n = next_fast_len(10 * number_of_steps)
            sf = ifft(s * coefficients, n) * float(n) / float(number_of_steps)

        # range_resolution = c / (2.0 * number_of_steps * frequency_step)
        range_unambiguous = c / (2.0 * frequency_step)

        range_window = linspace(0, range_unambiguous, n)

        # Clear the axes for the updated plot
        self.axes1.clear()

        # Create the line plot
        self.axes1.plot(range_window, 20.0 * log10(abs(sf) + finfo(float).eps), '')

        # Set the x and y axis labels
        self.axes1.set_xlabel("Range (m)", size=12)
        self.axes1.set_ylabel("Amplitude (dBsm)", size=12)

        # Turn on the grid
        self.axes1.grid(linestyle=':', linewidth=0.5)

        # Set the plot title and labels
        self.axes1.set_title('Stepped Frequency Range Profile', size=14)

        # Set the tick label size
        self.axes1.tick_params(labelsize=12)

        # Update the canvas
        self.my_canvas.draw()
示例#24
0
    def _update_canvas(self):
        """
        Update the figure when the user changes an input value
        :return:
        """
        # Get the parameters from the form
        bandwidth = float(self.bandwidth.text())
        pulsewidth = float(self.pulsewidth.text())
        target_range = self.target_range.text().split(',')
        target_rcs = self.target_rcs.text().split(',')

        t_range = [float(r) for r in target_range]
        t_rcs = [float(r) for r in target_rcs]

        # Get the selected window from the form
        window_type = self.window_type.currentText()

        # Number of samples
        N = int(2 * bandwidth * pulsewidth) * 8

        if window_type == 'Kaiser':
            coefficients = kaiser(N, 6, True)
        elif window_type == 'Blackman-Harris':
            coefficients = blackmanharris(N, True)
        elif window_type == 'Hanning':
            coefficients = hanning(N, True)
        elif window_type == 'Hamming':
            coefficients = hamming(N, True)
        elif window_type == 'Rectangular':
            coefficients = ones(N)

        # Set up the time vector
        t = linspace(-0.5 * pulsewidth, 0.5 * pulsewidth, N)

        # Calculate the baseband return signal
        s = zeros(N, dtype=complex)

        # Chirp slope
        alpha = 0.5 * bandwidth / pulsewidth

        for r, rcs in zip(t_range, t_rcs):
            s += sqrt(rcs) * exp(1j * 2.0 * pi * alpha * (t - 2.0 * r / c)**2)

        # Transmit signal
        st = exp(1j * 2 * pi * alpha * t**2)

        # Impulse response and matched filtering
        Hf = fft(conj(st * coefficients))
        Si = fft(s)
        so = fftshift(ifft(Si * Hf))

        # Range window
        range_window = linspace(-0.25 * c * pulsewidth, 0.25 * c * pulsewidth,
                                N)

        # Clear the axes for the updated plot
        self.axes1.clear()

        # Create the line plot
        self.axes1.plot(range_window,
                        20.0 * log10(abs(so) / N + finfo(float).eps), '')
        self.axes1.set_xlim(0, max(t_range) + 100)
        self.axes1.set_ylim(-60, max(20.0 * log10(abs(so) / N)) + 10)

        # Set the x and y axis labels
        self.axes1.set_xlabel("Range (m)", size=12)
        self.axes1.set_ylabel("Amplitude (dBsm)", size=12)

        # Turn on the grid
        self.axes1.grid(linestyle=':', linewidth=0.5)

        # Set the plot title and labels
        self.axes1.set_title('Matched Filter Range Profile', size=14)

        # Set the tick label size
        self.axes1.tick_params(labelsize=12)

        # Update the canvas
        self.my_canvas.draw()
示例#25
0
文件: pyDSP.py 项目: amckenna41/pySAR
    def pre_processing(self):
        """
        Complete various pre-processing steps for encoded protein sequences before
        doing any of the DSP-related functions or transformations. Zero-pad
        the sequences, remove any +/- infinity or NAN values, get the approximate
        protein spectra and window function parameter names.

        Parameters
        ----------
        :self (PyDSP object): 
            instance of PyDSP class.
            
        Returns
        -------
        None

        """
        #zero-pad encoded sequences so they are all the same length
        self.protein_seqs = zero_padding(self.protein_seqs)

        #get shape parameters of proteins seqs
        self.num_seqs = self.protein_seqs.shape[0]
        self.signal_len = self.protein_seqs.shape[1]

        #replace any positive or negative infinity or NAN values with 0
        self.protein_seqs[self.protein_seqs == -np.inf] = 0
        self.protein_seqs[self.protein_seqs == np.inf] = 0
        self.protein_seqs[self.protein_seqs == np.nan] = 0

        #replace any NAN's with 0's
        #self.protein_seqs.fillna(0, inplace=True)
        self.protein_seqs = np.nan_to_num(self.protein_seqs)

        #initialise zeros array to store all protein spectra
        self.fft_power = np.zeros((self.num_seqs, self.signal_len))
        self.fft_real = np.zeros((self.num_seqs, self.signal_len))
        self.fft_imag = np.zeros((self.num_seqs, self.signal_len))
        self.fft_abs = np.zeros((self.num_seqs, self.signal_len))

        #list of accepted spectra, window functions and filters
        all_spectra = ['power', 'absolute', 'real', 'imaginary']
        all_windows = [
            'hamming', 'blackman', 'blackmanharris', 'gaussian', 'bartlett',
            'kaiser', 'barthann', 'bohman', 'chebwin', 'cosine', 'exponential'
            'flattop', 'hann', 'boxcar', 'hanning', 'nuttall', 'parzen',
            'triang', 'tukey'
        ]
        all_filters = [
            'savgol', 'medfilt', 'symiirorder1', 'lfilter', 'hilbert'
        ]

        #set required input parameters, raise error if spectrum is none
        if self.spectrum == None:
            raise ValueError(
                'Invalid input Spectrum type ({}) not available in valid spectra: {}'
                .format(self.spectrum, all_spectra))
        else:
            #get closest correct spectra from user input, if no close match then raise error
            spectra_matches = (get_close_matches(self.spectrum,
                                                 all_spectra,
                                                 cutoff=0.4))

            if spectra_matches == []:
                raise ValueError(
                    'Invalid input Spectrum type ({}) not available in valid spectra: {}'
                    .format(self.spectrum, all_spectra))
            else:
                self.spectra = spectra_matches[0]  #closest match in array

        if self.window_type == None:
            self.window = 1  #window = 1 is the same as applying no window
        else:
            #get closest correct window function from user input
            window_matches = (get_close_matches(self.window,
                                                all_windows,
                                                cutoff=0.4))

            #check if sym=True or sym=False
            #get window function specified by window input parameter, if no match then window = 1
            if window_matches != []:
                if window_matches[0] == 'hamming':
                    self.window = hamming(self.signal_len, sym=True)
                    self.window_type = "hamming"
                elif window_matches[0] == "blackman":
                    self.window = blackman(self.signal_len, sym=True)
                    self.window = "blackman"
                elif window_matches[0] == "blackmanharris":
                    self.window = blackmanharris(self.signal_len,
                                                 sym=True)  #**
                    self.window_type = "blackmanharris"
                elif window_matches[0] == "bartlett":
                    self.window = bartlett(self.signal_len, sym=True)
                    self.window_type = "bartlett"
                elif window_matches[0] == "gaussian":
                    self.window = gaussian(self.signal_len, std=7, sym=True)
                    self.window_type = "gaussian"
                elif window_matches[0] == "kaiser":
                    self.window = kaiser(self.signal_len, beta=14, sym=True)
                    self.window_type = "kaiser"
                elif window_matches[0] == "hanning":
                    self.window = hanning(self.signal_len, sym=True)
                    self.window_type = "hanning"
                elif window_matches[0] == "barthann":
                    self.window = barthann(self.signal_len, sym=True)
                    self.window_type = "barthann"
                elif window_matches[0] == "bohman":
                    self.window = bohman(self.signal_len, sym=True)
                    self.window_type = "bohman"
                elif window_matches[0] == "chebwin":
                    self.window = chebwin(self.signal_len, sym=True)
                    self.window_type = "chebwin"
                elif window_matches[0] == "cosine":
                    self.window = cosine(self.signal_len, sym=True)
                    self.window_type = "cosine"
                elif window_matches[0] == "exponential":
                    self.window = exponential(self.signal_len, sym=True)
                    self.window_type = "exponential"
                elif window_matches[0] == "flattop":
                    self.window = flattop(self.signal_len, sym=True)
                    self.window_type = "flattop"
                elif window_matches[0] == "boxcar":
                    self.window = boxcar(self.signal_len, sym=True)
                    self.window_type = "boxcar"
                elif window_matches[0] == "nuttall":
                    self.window = nuttall(self.signal_len, sym=True)
                    self.window_type = "nuttall"
                elif window_matches[0] == "parzen":
                    self.window = parzen(self.signal_len, sym=True)
                    self.window_type = "parzen"
                elif window_matches[0] == "triang":
                    self.window = triang(self.signal_len, sym=True)
                    self.window_type = "triang"
                elif window_matches[0] == "tukey":
                    self.window = tukey(self.signal_len, sym=True)
                    self.window_type = "tukey"

            else:
                self.window = 1  #window = 1 is the same as applying no window

        #calculate convolution from protein sequences
        if self.convolution is not None:
            if self.window is not None:
                self.convoled_seqs = signal.convolve(
                    self.protein_seqs, self.window, mode='same') / sum(
                        self.window)

        if self.filter != None:
            #get closest correct filter from user input
            filter_matches = (get_close_matches(self.filter,
                                                all_filters,
                                                cutoff=0.4))

            #set filter attribute according to approximate user input
            if filter_matches != []:
                if filter_matches[0] == 'savgol':
                    self.filter = savgol_filter(self.signal_len,
                                                self.signal_len)
                elif filter_matches[0] == 'medfilt':
                    self.filter = medfilt(self.signal_len)
                elif filter_matches[0] == 'symiirorder1':
                    self.filter = symiirorder1(self.signal_len, c0=1, z1=1)
                elif filter_matches[0] == 'lfilter':
                    self.filter = lfilter(self.signal_len)
                elif filter_matches[0] == 'hilbert':
                    self.filter = hilbert(self.signal_len)
            else:
                self.filter = ""  #no filter
示例#26
0
    def test_reflection_gains(self):
        # introduce a cable reflection into the autocorrelation
        gains = sigchain.gen_reflection_gains(self.freqs, [0],
                                              amp=[1e-1],
                                              dly=[300],
                                              phs=[1])
        outvis = sigchain.apply_gains(self.vis, gains, [0, 0])
        ovfft = np.fft.fft(outvis *
                           windows.blackmanharris(len(self.freqs))[None, :],
                           axis=1)

        # assert reflection is at +300 ns and check its amplitude
        select = self.dlys > 200
        nt.assert_almost_equal(
            self.dlys[select][np.argmax(
                np.mean(np.abs(ovfft), axis=0)[select])], 300)
        select = np.argmin(np.abs(self.dlys - 300))
        m = np.mean(np.abs(ovfft), axis=0)
        nt.assert_true(np.isclose(m[select] / m[0], 1e-1, atol=1e-2))

        # assert also reflection at -300 ns
        select = self.dlys < -200
        nt.assert_almost_equal(
            self.dlys[select][np.argmax(
                np.mean(np.abs(ovfft), axis=0)[select])], -300)
        select = np.argmin(np.abs(self.dlys - -300))
        m = np.mean(np.abs(ovfft), axis=0)
        nt.assert_true(np.isclose(m[select] / m[0], 1e-1, atol=1e-2))

        # test reshaping into Ntimes
        amp = np.linspace(1e-2, 1e-3, 3)
        gains = sigchain.gen_reflection_gains(self.freqs, [0],
                                              amp=[amp],
                                              dly=[300],
                                              phs=[1])
        nt.assert_equal(gains[0].shape, (3, 100))

        # test frequency evolution with one time
        amp = np.linspace(1e-2, 1e-3, 100).reshape(1, -1)
        gains = sigchain.gen_reflection_gains(self.freqs, [0],
                                              amp=[amp],
                                              dly=[300],
                                              phs=[1])
        nt.assert_equal(gains[0].shape, (1, 100))
        # now test with multiple times
        amp = np.repeat(np.linspace(1e-2, 1e-3, 100).reshape(1, -1),
                        10,
                        axis=0)
        gains = sigchain.gen_reflection_gains(self.freqs, [0],
                                              amp=[amp],
                                              dly=[300],
                                              phs=[1])
        nt.assert_equal(gains[0].shape, (10, 100))

        # exception
        amp = np.linspace(1e-2, 1e-3, 2).reshape(1, -1)
        nt.assert_raises(AssertionError,
                         sigchain.gen_reflection_gains,
                         self.freqs, [0],
                         amp=[amp],
                         dly=[300],
                         phs=[1])
    def _update_canvas(self):
        """
        Update the figure when the user changes an input value
        :return:
        """
        # Get the parameters from the form
        bandwidth = float(self.bandwidth.text())
        pulsewidth = float(self.pulsewidth.text())
        range_window_length = float(self.range_window_length.text())
        target_range = self.target_range.text().split(',')
        target_rcs = self.target_rcs.text().split(',')

        t_range = [float(r) for r in target_range]
        t_rcs = [float(r) for r in target_rcs]

        # Get the selected window from the form
        window_type = self.window_type.currentText()

        # Number of samples
        number_of_samples = int(ceil(4 * bandwidth * range_window_length / c))

        if window_type == 'Kaiser':
            coefficients = kaiser(number_of_samples, 6, True)
        elif window_type == 'Blackman-Harris':
            coefficients = blackmanharris(number_of_samples, True)
        elif window_type == 'Hanning':
            coefficients = hanning(number_of_samples, True)
        elif window_type == 'Hamming':
            coefficients = hamming(number_of_samples, True)
        elif window_type == 'Rectangular':
            coefficients = ones(number_of_samples)

        # Time sampling
        t, dt = linspace(-0.5 * pulsewidth,
                         0.5 * pulsewidth,
                         number_of_samples,
                         retstep=True)

        # Sampled signal after mixing
        so = zeros(number_of_samples, dtype=complex)
        for r, rcs in zip(t_range, t_rcs):
            so += sqrt(rcs) * exp(1j * 2.0 * pi * bandwidth / pulsewidth *
                                  (2 * r / c) * t)

        # Fourier transform
        so = fftshift(fft(so * coefficients, 4 * number_of_samples))

        # FFT frequencies
        frequencies = fftshift(fftfreq(4 * number_of_samples, dt))

        # Range window
        range_window = 0.5 * frequencies * c * pulsewidth / bandwidth

        # Clear the axes for the updated plot
        self.axes1.clear()

        # Create the line plot
        self.axes1.plot(
            range_window,
            20.0 * log10(abs(so) / number_of_samples + finfo(float).eps), '')
        self.axes1.set_xlim(min(t_range) - 5, max(t_range) + 5)
        self.axes1.set_ylim(
            -60,
            max(20.0 * log10(abs(so) / number_of_samples)) + 10)

        # Set the x and y axis labels
        self.axes1.set_xlabel("Range (m)", size=12)
        self.axes1.set_ylabel("Amplitude (dBsm)", size=12)

        # Turn on the grid
        self.axes1.grid(linestyle=':', linewidth=0.5)

        # Set the plot title and labels
        self.axes1.set_title('Stretch Processor Range Profile', size=14)

        # Set the tick label size
        self.axes1.tick_params(labelsize=12)

        # Update the canvas
        self.my_canvas.draw()