示例#1
0
def acf_fundamental_freq(x, fmin, fmax, fs):
    y = copy.copy(x)
    y = preprocessing.avoid_overlap(y, N=100, f=fmax + 100, fs=fs,
                                    plot=False)  # fmax+100为的是留出一些裕度,因为低通滤波器不理想
    # time_series = preprocessing.downsample(time_series, fs, 4410)
    nmin = int(fs / fmax)
    nmax = int(fs / fmin)
    acf = stattools.acf(y, nlags=nmax)
    f0 = fs / np.argmax(acf[nmin:])
    return f0
示例#2
0
sample_num = []  #统计每个标签的样本数
for j in range(len(labelname)):
    subpath = path + '\\' + labelname[j]
    subfilename = os.listdir(subpath)  #查看音频文件目录
    sample_num.append(len(subfilename))  # 统计文件数量,也可以认为是统计样本数量
    for i in range(len(subfilename)):
        audio_data, sample_rate = librosa.load(subpath + '\\' + subfilename[i],
                                               sr=None,
                                               mono=True,
                                               res_type='kaiser_best')  # 读取文件
        # pre_amphasis = preprocessing.pre_emphasis(audio_data, 0.97, pic=savepic + '\\' + 'pre_amphasis_'+str(j)+'_'+str(i))
        pre_amphasis = audio_data
        avoid_overlap = preprocessing.avoid_overlap(pre_amphasis,
                                                    N=10,
                                                    f=4000,
                                                    fs=sample_rate,
                                                    plot=False)
        downsample = preprocessing.downsample(avoid_overlap, sample_rate,
                                              downsample_rate)
        silence_remove = preprocessing.silence_remove(
            downsample,
            limit=np.max(downsample) / 20,
            option='hilbert',
            # pic=savepic + '\\' + 'silence_remove_hilbert_' + str(j)+'_'+str(i)
            pic=None)
        # _, _, _ = visual.stft_specgram(silence_remove, picname=savepic + '\\' + 'stft_'+str(j)+'_'+str(i), fs=downsample_rate,
        #                                nperseg=512, noverlap=128, nfft=1024)
        buffer = [j] + list(silence_remove)

        # 写入数据
示例#3
0
import scipy.signal as signal
import numpy as np
import librosa as lib
import matplotlib.pyplot as plt
import time
from statsmodels.tsa import stattools
import preprocessing
import feature_extraction as fe

# ex = '..\\..\\boy_and_girl\\class1\\arctic_a0012.wav'
# ex = '..\\..\\cello_and_viola\\viola\\Viola.arco.ff.sulA.A4.stereo.aiff'
time_series, fs = lib.load(ex, sr=None, mono=True, res_type='kaiser_best')

time_series = preprocessing.avoid_overlap(time_series,
                                          N=100,
                                          f=500,
                                          fs=fs,
                                          plot=False)
time_series = preprocessing.downsample(time_series, fs, 4410)

print(fs)
frames = preprocessing.frame(time_series, int(0.03 * fs), int(0.015 * fs))
for i in range(frames.shape[1]):
    acf1 = stattools.acf(frames[:, i], nlags=100)
    fft, _ = fe.fft_singleside(frames[:, i], 4410, 8096)

    plt.figure()
    plt.subplot(211)
    plt.plot(np.abs(fft))
    plt.subplot(212)
    plt.stem(acf1)
示例#4
0
import librosa as lib
import matplotlib.pyplot as plt
import time
from preprocess_filter import *
import preprocessing
import feature_extraction as fe

# ex = '..\\..\\数据集2\\pre2012\\bflute\\BassFlute.ff.C5B5.aiff'
ex = '..\\..\\cello_and_viola\\viola\\Viola.arco.ff.sulA.A4.stereo.aiff'
x, fs = lib.load(ex, sr=None, mono=True, res_type='kaiser_best')
x = x[1000:2500]
downsample_rate = 22050

avoid_overlap = preprocessing.avoid_overlap(x,
                                            N=20,
                                            f=downsample_rate / 2,
                                            fs=fs,
                                            plot=False)
downsample = preprocessing.downsample(avoid_overlap, fs, downsample_rate)

S1, f1 = fe.fft_singleside(x, fs, n=8192)
S2, f2 = fe.fft_singleside(avoid_overlap, fs, n=8192)
S3, f3 = fe.fft_singleside(downsample, downsample_rate, n=8192)

plt.figure()
ax1 = plt.subplot(211)
ax1.set_xlabel('time')
ax1.set_ylabel('mag')
ax1.set_title('original signal')
plt.plot(x)
ax2 = plt.subplot(212)
示例#5
0
for i in range(1):
    subpath = path + '\\' + labelname[i]
    subfilename = os.listdir(subpath)
    sample_num.append(len(subfilename))   # 统计文件数量,也可以认为是统计样本数量
    list = []
    audio_data, sample_rate = librosa.load(
        subpath + '\\' + subfilename[0], sr=None, mono=True, res_type='kaiser_best')
    list.append(audio_data.tolist())
    audio_data_set[labelname[i]] = list

for i in range(1):
    data = np.array(audio_data_set[labelname[i]][0])
    pre_amphasis = preprocessing.pre_emphasis(data, 0.97, pic='pre_amphasis'+str(i))
    avoid_overlap = preprocessing.avoid_overlap(pre_amphasis,
                                N=10,
                                f=11000,
                                fs=sample_rate,
                                plot=True)
    downsample = preprocessing.downsample(avoid_overlap, sample_rate, downsample_rate)
    start = time.perf_counter()
    silence_remove = preprocessing.silence_remove(
        downsample,
        limit=0.001,
        option=filter,
        pic='silence_remove_filter'+str(i),
        N=10,
        f=100,
        fs=downsample_rate,
        plot=True)
    end = time.perf_counter()
    print(end-start)
示例#6
0
def load_and_preprocess(amphasis, down, clip, factor, saveprojectpath,
                        savedata, savepic, savetestdata, savepreprocess,
                        savefeature, path, downsample_rate, frame_time,
                        frame_length, frame_overlap, test_rate):
    if not os.path.exists('..\\仿真结果'):
        os.mkdir('..\\仿真结果')
    if not os.path.exists(saveprojectpath):
        os.mkdir(saveprojectpath)
    if not os.path.exists(savedata):  # 保存数据
        os.mkdir(savedata)
    if not os.path.exists(savepic):  # 创建保存图片的文件夹
        os.mkdir(savepic)
    if not os.path.exists(savetestdata):  # 创建保存测试数据的文件
        os.mkdir(savetestdata)

    # 写入数据
    with open(savepreprocess, 'w',
              encoding='utf-8') as csvfile:  # 先创建csv文件,什么都不写入,之后再追加
        writer = csv.writer(csvfile)
    # 读取音频文件目录
    labelname = os.listdir(path)  # 获取该路径下的子文件名,也是标签的名字
    for j in range(len(labelname)):
        subpath = path + '\\' + labelname[j]  # 数据集的子文件路径
        subfilename = os.listdir(subpath)  # 查看音频文件目录
        for i in range(len(subfilename)):
            audio_data, sample_rate = librosa.load(
                subpath + '\\' + subfilename[i],
                sr=None,
                mono=True,
                res_type='kaiser_best')  # 读取音频文件
            audio_data = librosa.util.normalize(audio_data,
                                                norm=np.inf,
                                                axis=0,
                                                threshold=None,
                                                fill=None)
            # audio_data = audio_data[0:8820]
            ##########################################################################################################################
            if amphasis:
                pre_amphasis = preprocessing.pre_emphasis(audio_data,
                                                          0.97,
                                                          pic=None)
                # pic=savepic + '\\' + 'pre_amphasis_'+str(j)+'_'+str(i))
            else:
                pre_amphasis = audio_data

            if down:
                avoid_overlap = preprocessing.avoid_overlap(pre_amphasis,
                                                            N=20,
                                                            f=downsample_rate /
                                                            2,
                                                            fs=sample_rate,
                                                            plot=False)
                downsample = preprocessing.downsample(avoid_overlap,
                                                      sample_rate,
                                                      downsample_rate)
            else:
                downsample = pre_amphasis

            if clip == 'hilbert':
                silence_remove = preprocessing.silence_remove(
                    x=downsample,
                    limit=np.max(downsample) * factor,
                    fs=downsample_rate,
                    option='hilbert',
                    # pic=savepic + '\\' + 'silence_remove_hilbert_' + str(j)+'_'+str(i))
                    pic=None)
            elif clip == 'HF':
                silence_remove = preprocessing.silence_remove(
                    x=downsample,
                    limit=np.max(downsample) * factor,
                    fs=downsample_rate,
                    option='HF',
                    # pic=savepic + '\\' + 'silence_remove_hilbert_filter_' + str(j)+'_'+str(i))
                    pic=None)
            elif clip == 'filter':
                silence_remove = preprocessing.silence_remove(
                    x=downsample,
                    limit=0.02,
                    option='filter',
                    pic=savepic + '\\' + 'silence_remove_filter_' + str(j) +
                    '_' + str(i),
                    N=10,
                    f=600,
                    fs=downsample_rate)
            else:
                silence_remove = downsample

########################################################################################################################
# j表示标签, i表示同一标签下的音频文件序号
            buffer = [j] + [i] + list(silence_remove)
            with open(savepreprocess, 'a+', newline='',
                      encoding='utf-8') as csvfile:
                csv_write = csv.writer(csvfile)
                csv_write.writerow(buffer)  # 逐行写入数据

            print('preprocessing:', j, i)

    return labelname