-
Notifications
You must be signed in to change notification settings - Fork 0
/
audio_features.py
186 lines (165 loc) · 8.26 KB
/
audio_features.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
"""Code to extract audio features
References:
[1] Haytham Fayek's blog post on "Speech Processing for Machine Learning"
url: http://haythamfayek.com/2016/04/21/speech-processing-for-machine-learning.html
[2] Theodoros Giannakopoulos' "pyAudioAnalysis: An Open-Source Python Library for Audio Signal Analysis"
url: https://github.com/tyiannak/pyAudioAnalysis
"""
import numpy as np # matrix math
from scipy.io import wavfile # reading the wavfile
from scipy.fftpack import dct
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import audioFeatureExtraction
from mel_features import log_mel_spectrogram
# 40 dim features
def extract_logmel(path_file,
frame_size=25e-3,
frame_stride=10e-3,
NFFT=512,
nfilt=40,
normalize=True):
"""Code to extract logmel features
Inputs: path_file: The path to the audio file
frame_size: frame size to use in milliseconds. default=25ms
frame_stride: frames stride to use in milliseconds. default=10ms
NFFT: n-point FFT. default=512
nfilt: number of mel-filter banks to apply. default=40
normalize: Whether to return normalized or unnormalized coefficients. default=True
Outputs: filter_bank: nfilt Melfilter bank coefficients
"""
sample_rate, signal = wavfile.read(path_file)
pre_emphasis = 0.97
emphasized_signal = np.append(signal[0], signal[1:] - pre_emphasis * signal[:-1])
frame_length, frame_step = frame_size * sample_rate, frame_stride * sample_rate # Convert from seconds to samples
signal_length = len(emphasized_signal)
frame_length = int(round(frame_length))
frame_step = int(round(frame_step))
num_frames = int(np.ceil(float(np.abs(signal_length - frame_length)) / frame_step)) # Make sure that we have at least 1 frame
pad_signal_length = num_frames * frame_step + frame_length
z = np.zeros((pad_signal_length - signal_length))
# Pad Signal to make sure that all frames have equal number of samples without
# truncating any samples from the original signal
pad_signal = np.append(emphasized_signal, z)
indices = np.tile(np.arange(0, frame_length), (num_frames, 1)) \
+ np.tile(np.arange(0, num_frames * frame_step, frame_step), (frame_length, 1)).T
frames = pad_signal[indices.astype(np.int32, copy=False)]
# hamming window
frames *= np.hamming(frame_length)
mag_frames = np.absolute(np.fft.rfft(frames, NFFT)) # Magnitude of the FFT
pow_frames = ((1.0 / NFFT) * ((mag_frames) ** 2)) # Power Spectrum
low_freq_mel = 0
high_freq_mel = (2595 * np.log10(1 + (sample_rate / 2) / 700)) # Convert Hz to Mel
mel_points = np.linspace(low_freq_mel, high_freq_mel, nfilt + 2) # Equally spaced in Mel scale
hz_points = (700 * (10**(mel_points / 2595) - 1)) # Convert Mel to Hz
bin = np.floor((NFFT + 1) * hz_points / sample_rate)
fbank = np.zeros((nfilt, int(np.floor(NFFT / 2 + 1))))
for m in range(1, nfilt + 1):
f_m_minus = int(bin[m - 1]) # left
f_m = int(bin[m]) # center
f_m_plus = int(bin[m + 1]) # right
for k in range(f_m_minus, f_m):
fbank[m - 1, k] = (k - bin[m - 1]) / (bin[m] - bin[m - 1])
for k in range(f_m, f_m_plus):
fbank[m - 1, k] = (bin[m + 1] - k) / (bin[m + 1] - bin[m])
filter_banks = np.dot(pow_frames, fbank.T)
filter_banks = np.where(filter_banks == 0, np.finfo(float).eps, filter_banks) # Numerical Stability
filter_banks = 20 * np.log10(filter_banks) # dB
if normalize:
filter_banks -= (np.mean(filter_banks, axis=0) + 1e-8)
return filter_banks
# 12 dim features
def extract_mfcc(path_file,
frame_size=25e-3,
frame_stride=10e-3,
NFFT=512,
nfilt=40,
num_ceps=12,
normalize=True):
"""Code to extract MFCC that internally extracts logmel features
and then applies DCT to it
Additional inputs: num_ceps: Number of cepstral coefficients to return. Default=12
Outputs: mfcc: Normalized Mel frequency cepstral coefficients
"""
filter_banks = extract_logmel(path_file,
frame_size=25e-3,
frame_stride=10e-3,
NFFT=512,
nfilt=40,
normalize=False)
mfcc = dct(filter_banks, type=2, axis=1, norm='ortho')[:, 1 : (num_ceps + 1)] # Keep 2-13
cep_lifter = 22
(nframes, ncoeff) = mfcc.shape
n = np.arange(ncoeff)
lift = 1 + (cep_lifter / 2) * np.sin(np.pi * n / cep_lifter)
mfcc *= lift
if normalize:
mfcc -= (np.mean(mfcc, axis=0) + 1e-8)
return mfcc
# 62 dim
def extract_features(path_file,
frame_size=25e-3,
frame_stride=10e-3):
"""Function to combine logmel and frame level ST features
extracted using pyAudioAnalysis Library
Output: 40+22 = 62 dim logmel+ST features
"""
[sample_rate, signal] = audioBasicIO.readAudioFile(path_file)
frame_length, frame_step = frame_size * sample_rate, frame_stride * sample_rate # Convert from seconds to samples
# signal_length = len(emphasized_signal)
frame_length = int(round(frame_length))
frame_step = int(round(frame_step))
st_features = audioFeatureExtraction.stFeatureExtraction(signal,
sample_rate,
frame_length,
frame_step)
filter_banks = extract_logmel(path_file,
frame_size=25e-3,
frame_stride=10e-3,
normalize=False)
st_features = np.transpose(st_features) # transpose to make frame_count as x-axis
st_features = np.delete(st_features, np.s_[8:21], axis=1) # delete the MFCCs
if st_features.shape[0] - filter_banks.shape[0] == 1:
st_features = st_features[:-1, :]
# print (st_features.shape[0], filter_banks.shape[0])
features = np.c_[st_features, filter_banks]
features -= (np.mean(features, axis=0) + 1e-8)
return features
# 34 dim
# description: https://github.com/tyiannak/pyAudioAnalysis/wiki/3.-Feature-Extraction
def extract_stfeatures(path_file,
frame_size=25e-3,
frame_stride=10e-3):
"""Function to extact only ST features including MFCC
using PyAudioAnalysis Library
Output: 34 dim ST features
"""
[sample_rate, signal] = audioBasicIO.readAudioFile(path_file)
frame_length, frame_step = frame_size * sample_rate, frame_stride * sample_rate # Convert from seconds to samples
frame_length = int(round(frame_length))
frame_step = int(round(frame_step))
st_features = audioFeatureExtraction.stFeatureExtraction(signal,
sample_rate,
frame_length,
frame_step)
st_features = np.transpose(st_features) # transpose to make frame_count as x-axis
return st_features
# 40 dim
def extract_alt_logmel(path_file,
frame_size=0.025,
frame_stride=0.010,
normalize=True):
"""This function extracts logmel features using the provided logmel feature extraction
code included in the google audioset (vggish) repository. Main difference is it uses
Hann Window instead of Hamming window
"""
sample_rate, signal = wavfile.read(path_file)
filter_banks = log_mel_spectrogram(signal,
audio_sample_rate=sample_rate,
log_offset=0.0,
window_length_secs=frame_size,
hop_length_secs=frame_stride)
if normalize:
filter_banks -= (np.mean(filter_banks, axis=0) + 1e-8)
# print (np.mean(filter_banks, axis=0))
# print (filter_banks.shape)
return filter_banks